scsi_transport_fc.c 128 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * FiberChannel transport specific attributes exported to sysfs.
  4. *
  5. * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
  6. * Copyright (C) 2004-2007 James Smart, Emulex Corporation
  7. * Rewrite for host, target, device, and remote port attributes,
  8. * statistics, and service functions...
  9. * Add vports, etc
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/slab.h>
  14. #include <linux/delay.h>
  15. #include <linux/kernel.h>
  16. #include <linux/bsg-lib.h>
  17. #include <scsi/scsi_device.h>
  18. #include <scsi/scsi_host.h>
  19. #include <scsi/scsi_transport.h>
  20. #include <scsi/scsi_transport_fc.h>
  21. #include <scsi/scsi_cmnd.h>
  22. #include <net/netlink.h>
  23. #include <scsi/scsi_netlink_fc.h>
  24. #include <scsi/scsi_bsg_fc.h>
  25. #include <uapi/scsi/fc/fc_els.h>
  26. #include "scsi_priv.h"
  27. static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
  28. static void fc_vport_sched_delete(struct work_struct *work);
  29. static int fc_vport_setup(struct Scsi_Host *shost, int channel,
  30. struct device *pdev, struct fc_vport_identifiers *ids,
  31. struct fc_vport **vport);
  32. static int fc_bsg_hostadd(struct Scsi_Host *, struct fc_host_attrs *);
  33. static int fc_bsg_rportadd(struct Scsi_Host *, struct fc_rport *);
  34. static void fc_bsg_remove(struct request_queue *);
  35. static void fc_bsg_goose_queue(struct fc_rport *);
  36. static void fc_li_stats_update(u16 event_type,
  37. struct fc_fpin_stats *stats);
  38. static void fc_delivery_stats_update(u32 reason_code,
  39. struct fc_fpin_stats *stats);
  40. static void fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats);
  41. /*
  42. * Module Parameters
  43. */
  44. /*
  45. * dev_loss_tmo: the default number of seconds that the FC transport
  46. * should insulate the loss of a remote port.
  47. * The maximum will be capped by the value of SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
  48. */
  49. static unsigned int fc_dev_loss_tmo = 60; /* seconds */
  50. module_param_named(dev_loss_tmo, fc_dev_loss_tmo, uint, S_IRUGO|S_IWUSR);
  51. MODULE_PARM_DESC(dev_loss_tmo,
  52. "Maximum number of seconds that the FC transport should"
  53. " insulate the loss of a remote port. Once this value is"
  54. " exceeded, the scsi target is removed. Value should be"
  55. " between 1 and SCSI_DEVICE_BLOCK_MAX_TIMEOUT if"
  56. " fast_io_fail_tmo is not set.");
  57. /*
  58. * Redefine so that we can have same named attributes in the
  59. * sdev/starget/host objects.
  60. */
  61. #define FC_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \
  62. struct device_attribute device_attr_##_prefix##_##_name = \
  63. __ATTR(_name,_mode,_show,_store)
  64. #define fc_enum_name_search(title, table_type, table) \
  65. static const char *get_fc_##title##_name(enum table_type table_key) \
  66. { \
  67. int i; \
  68. char *name = NULL; \
  69. \
  70. for (i = 0; i < ARRAY_SIZE(table); i++) { \
  71. if (table[i].value == table_key) { \
  72. name = table[i].name; \
  73. break; \
  74. } \
  75. } \
  76. return name; \
  77. }
  78. #define fc_enum_name_match(title, table_type, table) \
  79. static int get_fc_##title##_match(const char *table_key, \
  80. enum table_type *value) \
  81. { \
  82. int i; \
  83. \
  84. for (i = 0; i < ARRAY_SIZE(table); i++) { \
  85. if (strncmp(table_key, table[i].name, \
  86. table[i].matchlen) == 0) { \
  87. *value = table[i].value; \
  88. return 0; /* success */ \
  89. } \
  90. } \
  91. return 1; /* failure */ \
  92. }
  93. /* Convert fc_port_type values to ascii string name */
  94. static struct {
  95. enum fc_port_type value;
  96. char *name;
  97. } fc_port_type_names[] = {
  98. { FC_PORTTYPE_UNKNOWN, "Unknown" },
  99. { FC_PORTTYPE_OTHER, "Other" },
  100. { FC_PORTTYPE_NOTPRESENT, "Not Present" },
  101. { FC_PORTTYPE_NPORT, "NPort (fabric via point-to-point)" },
  102. { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
  103. { FC_PORTTYPE_LPORT, "LPort (private loop)" },
  104. { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection)" },
  105. { FC_PORTTYPE_NPIV, "NPIV VPORT" },
  106. };
  107. fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
  108. #define FC_PORTTYPE_MAX_NAMELEN 50
  109. /* Reuse fc_port_type enum function for vport_type */
  110. #define get_fc_vport_type_name get_fc_port_type_name
  111. /* Convert fc_host_event_code values to ascii string name */
  112. static const struct {
  113. enum fc_host_event_code value;
  114. char *name;
  115. } fc_host_event_code_names[] = {
  116. { FCH_EVT_LIP, "lip" },
  117. { FCH_EVT_LINKUP, "link_up" },
  118. { FCH_EVT_LINKDOWN, "link_down" },
  119. { FCH_EVT_LIPRESET, "lip_reset" },
  120. { FCH_EVT_RSCN, "rscn" },
  121. { FCH_EVT_ADAPTER_CHANGE, "adapter_chg" },
  122. { FCH_EVT_PORT_UNKNOWN, "port_unknown" },
  123. { FCH_EVT_PORT_ONLINE, "port_online" },
  124. { FCH_EVT_PORT_OFFLINE, "port_offline" },
  125. { FCH_EVT_PORT_FABRIC, "port_fabric" },
  126. { FCH_EVT_LINK_UNKNOWN, "link_unknown" },
  127. { FCH_EVT_LINK_FPIN, "link_FPIN" },
  128. { FCH_EVT_VENDOR_UNIQUE, "vendor_unique" },
  129. };
  130. fc_enum_name_search(host_event_code, fc_host_event_code,
  131. fc_host_event_code_names)
  132. #define FC_HOST_EVENT_CODE_MAX_NAMELEN 30
  133. /* Convert fc_port_state values to ascii string name */
  134. static struct {
  135. enum fc_port_state value;
  136. char *name;
  137. int matchlen;
  138. } fc_port_state_names[] = {
  139. { FC_PORTSTATE_UNKNOWN, "Unknown", 7},
  140. { FC_PORTSTATE_NOTPRESENT, "Not Present", 11 },
  141. { FC_PORTSTATE_ONLINE, "Online", 6 },
  142. { FC_PORTSTATE_OFFLINE, "Offline", 7 },
  143. { FC_PORTSTATE_BLOCKED, "Blocked", 7 },
  144. { FC_PORTSTATE_BYPASSED, "Bypassed", 8 },
  145. { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics", 11 },
  146. { FC_PORTSTATE_LINKDOWN, "Linkdown", 8 },
  147. { FC_PORTSTATE_ERROR, "Error", 5 },
  148. { FC_PORTSTATE_LOOPBACK, "Loopback", 8 },
  149. { FC_PORTSTATE_DELETED, "Deleted", 7 },
  150. { FC_PORTSTATE_MARGINAL, "Marginal", 8 },
  151. };
  152. fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
  153. fc_enum_name_match(port_state, fc_port_state, fc_port_state_names)
  154. #define FC_PORTSTATE_MAX_NAMELEN 20
  155. /* Convert fc_vport_state values to ascii string name */
  156. static struct {
  157. enum fc_vport_state value;
  158. char *name;
  159. } fc_vport_state_names[] = {
  160. { FC_VPORT_UNKNOWN, "Unknown" },
  161. { FC_VPORT_ACTIVE, "Active" },
  162. { FC_VPORT_DISABLED, "Disabled" },
  163. { FC_VPORT_LINKDOWN, "Linkdown" },
  164. { FC_VPORT_INITIALIZING, "Initializing" },
  165. { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
  166. { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
  167. { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
  168. { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
  169. { FC_VPORT_FAILED, "VPort Failed" },
  170. };
  171. fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
  172. #define FC_VPORTSTATE_MAX_NAMELEN 24
  173. /* Reuse fc_vport_state enum function for vport_last_state */
  174. #define get_fc_vport_last_state_name get_fc_vport_state_name
  175. /* Convert fc_tgtid_binding_type values to ascii string name */
  176. static const struct {
  177. enum fc_tgtid_binding_type value;
  178. char *name;
  179. int matchlen;
  180. } fc_tgtid_binding_type_names[] = {
  181. { FC_TGTID_BIND_NONE, "none", 4 },
  182. { FC_TGTID_BIND_BY_WWPN, "wwpn (World Wide Port Name)", 4 },
  183. { FC_TGTID_BIND_BY_WWNN, "wwnn (World Wide Node Name)", 4 },
  184. { FC_TGTID_BIND_BY_ID, "port_id (FC Address)", 7 },
  185. };
  186. fc_enum_name_search(tgtid_bind_type, fc_tgtid_binding_type,
  187. fc_tgtid_binding_type_names)
  188. fc_enum_name_match(tgtid_bind_type, fc_tgtid_binding_type,
  189. fc_tgtid_binding_type_names)
  190. #define FC_BINDTYPE_MAX_NAMELEN 30
  191. #define fc_bitfield_name_search(title, table) \
  192. static ssize_t \
  193. get_fc_##title##_names(u32 table_key, char *buf) \
  194. { \
  195. char *prefix = ""; \
  196. ssize_t len = 0; \
  197. int i; \
  198. \
  199. for (i = 0; i < ARRAY_SIZE(table); i++) { \
  200. if (table[i].value & table_key) { \
  201. len += sprintf(buf + len, "%s%s", \
  202. prefix, table[i].name); \
  203. prefix = ", "; \
  204. } \
  205. } \
  206. len += sprintf(buf + len, "\n"); \
  207. return len; \
  208. }
  209. /* Convert FC_COS bit values to ascii string name */
  210. static const struct {
  211. u32 value;
  212. char *name;
  213. } fc_cos_names[] = {
  214. { FC_COS_CLASS1, "Class 1" },
  215. { FC_COS_CLASS2, "Class 2" },
  216. { FC_COS_CLASS3, "Class 3" },
  217. { FC_COS_CLASS4, "Class 4" },
  218. { FC_COS_CLASS6, "Class 6" },
  219. };
  220. fc_bitfield_name_search(cos, fc_cos_names)
  221. /* Convert FC_PORTSPEED bit values to ascii string name */
  222. static const struct {
  223. u32 value;
  224. char *name;
  225. } fc_port_speed_names[] = {
  226. { FC_PORTSPEED_1GBIT, "1 Gbit" },
  227. { FC_PORTSPEED_2GBIT, "2 Gbit" },
  228. { FC_PORTSPEED_4GBIT, "4 Gbit" },
  229. { FC_PORTSPEED_10GBIT, "10 Gbit" },
  230. { FC_PORTSPEED_8GBIT, "8 Gbit" },
  231. { FC_PORTSPEED_16GBIT, "16 Gbit" },
  232. { FC_PORTSPEED_32GBIT, "32 Gbit" },
  233. { FC_PORTSPEED_20GBIT, "20 Gbit" },
  234. { FC_PORTSPEED_40GBIT, "40 Gbit" },
  235. { FC_PORTSPEED_50GBIT, "50 Gbit" },
  236. { FC_PORTSPEED_100GBIT, "100 Gbit" },
  237. { FC_PORTSPEED_25GBIT, "25 Gbit" },
  238. { FC_PORTSPEED_64GBIT, "64 Gbit" },
  239. { FC_PORTSPEED_128GBIT, "128 Gbit" },
  240. { FC_PORTSPEED_256GBIT, "256 Gbit" },
  241. { FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
  242. };
  243. fc_bitfield_name_search(port_speed, fc_port_speed_names)
  244. static int
  245. show_fc_fc4s (char *buf, u8 *fc4_list)
  246. {
  247. int i, len=0;
  248. for (i = 0; i < FC_FC4_LIST_SIZE; i++, fc4_list++)
  249. len += sprintf(buf + len , "0x%02x ", *fc4_list);
  250. len += sprintf(buf + len, "\n");
  251. return len;
  252. }
  253. /* Convert FC_PORT_ROLE bit values to ascii string name */
  254. static const struct {
  255. u32 value;
  256. char *name;
  257. } fc_port_role_names[] = {
  258. { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
  259. { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
  260. { FC_PORT_ROLE_IP_PORT, "IP Port" },
  261. { FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" },
  262. { FC_PORT_ROLE_NVME_INITIATOR, "NVMe Initiator" },
  263. { FC_PORT_ROLE_NVME_TARGET, "NVMe Target" },
  264. { FC_PORT_ROLE_NVME_DISCOVERY, "NVMe Discovery" },
  265. };
  266. fc_bitfield_name_search(port_roles, fc_port_role_names)
  267. /*
  268. * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
  269. */
  270. #define FC_WELLKNOWN_PORTID_MASK 0xfffff0
  271. #define FC_WELLKNOWN_ROLE_MASK 0x00000f
  272. #define FC_FPORT_PORTID 0x00000e
  273. #define FC_FABCTLR_PORTID 0x00000d
  274. #define FC_DIRSRVR_PORTID 0x00000c
  275. #define FC_TIMESRVR_PORTID 0x00000b
  276. #define FC_MGMTSRVR_PORTID 0x00000a
  277. static void fc_timeout_deleted_rport(struct work_struct *work);
  278. static void fc_timeout_fail_rport_io(struct work_struct *work);
  279. static void fc_scsi_scan_rport(struct work_struct *work);
  280. /*
  281. * Attribute counts pre object type...
  282. * Increase these values if you add attributes
  283. */
  284. #define FC_STARGET_NUM_ATTRS 3
  285. #define FC_RPORT_NUM_ATTRS 10
  286. #define FC_VPORT_NUM_ATTRS 9
  287. #define FC_HOST_NUM_ATTRS 29
  288. struct fc_internal {
  289. struct scsi_transport_template t;
  290. struct fc_function_template *f;
  291. /*
  292. * For attributes : each object has :
  293. * An array of the actual attributes structures
  294. * An array of null-terminated pointers to the attribute
  295. * structures - used for mid-layer interaction.
  296. *
  297. * The attribute containers for the starget and host are are
  298. * part of the midlayer. As the remote port is specific to the
  299. * fc transport, we must provide the attribute container.
  300. */
  301. struct device_attribute private_starget_attrs[
  302. FC_STARGET_NUM_ATTRS];
  303. struct device_attribute *starget_attrs[FC_STARGET_NUM_ATTRS + 1];
  304. struct device_attribute private_host_attrs[FC_HOST_NUM_ATTRS];
  305. struct device_attribute *host_attrs[FC_HOST_NUM_ATTRS + 1];
  306. struct transport_container rport_attr_cont;
  307. struct device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
  308. struct device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
  309. struct transport_container vport_attr_cont;
  310. struct device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
  311. struct device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
  312. };
  313. #define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
  314. static int fc_target_setup(struct transport_container *tc, struct device *dev,
  315. struct device *cdev)
  316. {
  317. struct scsi_target *starget = to_scsi_target(dev);
  318. struct fc_rport *rport = starget_to_rport(starget);
  319. /*
  320. * if parent is remote port, use values from remote port.
  321. * Otherwise, this host uses the fc_transport, but not the
  322. * remote port interface. As such, initialize to known non-values.
  323. */
  324. if (rport) {
  325. fc_starget_node_name(starget) = rport->node_name;
  326. fc_starget_port_name(starget) = rport->port_name;
  327. fc_starget_port_id(starget) = rport->port_id;
  328. } else {
  329. fc_starget_node_name(starget) = -1;
  330. fc_starget_port_name(starget) = -1;
  331. fc_starget_port_id(starget) = -1;
  332. }
  333. return 0;
  334. }
  335. static DECLARE_TRANSPORT_CLASS(fc_transport_class,
  336. "fc_transport",
  337. fc_target_setup,
  338. NULL,
  339. NULL);
  340. static int fc_host_setup(struct transport_container *tc, struct device *dev,
  341. struct device *cdev)
  342. {
  343. struct Scsi_Host *shost = dev_to_shost(dev);
  344. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  345. /*
  346. * Set default values easily detected by the midlayer as
  347. * failure cases. The scsi lldd is responsible for initializing
  348. * all transport attributes to valid values per host.
  349. */
  350. fc_host->node_name = -1;
  351. fc_host->port_name = -1;
  352. fc_host->permanent_port_name = -1;
  353. fc_host->supported_classes = FC_COS_UNSPECIFIED;
  354. memset(fc_host->supported_fc4s, 0,
  355. sizeof(fc_host->supported_fc4s));
  356. fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
  357. fc_host->maxframe_size = -1;
  358. fc_host->max_npiv_vports = 0;
  359. memset(fc_host->serial_number, 0,
  360. sizeof(fc_host->serial_number));
  361. memset(fc_host->manufacturer, 0,
  362. sizeof(fc_host->manufacturer));
  363. memset(fc_host->model, 0,
  364. sizeof(fc_host->model));
  365. memset(fc_host->model_description, 0,
  366. sizeof(fc_host->model_description));
  367. memset(fc_host->hardware_version, 0,
  368. sizeof(fc_host->hardware_version));
  369. memset(fc_host->driver_version, 0,
  370. sizeof(fc_host->driver_version));
  371. memset(fc_host->firmware_version, 0,
  372. sizeof(fc_host->firmware_version));
  373. memset(fc_host->optionrom_version, 0,
  374. sizeof(fc_host->optionrom_version));
  375. fc_host->port_id = -1;
  376. fc_host->port_type = FC_PORTTYPE_UNKNOWN;
  377. fc_host->port_state = FC_PORTSTATE_UNKNOWN;
  378. memset(fc_host->active_fc4s, 0,
  379. sizeof(fc_host->active_fc4s));
  380. fc_host->speed = FC_PORTSPEED_UNKNOWN;
  381. fc_host->fabric_name = -1;
  382. memset(fc_host->symbolic_name, 0, sizeof(fc_host->symbolic_name));
  383. memset(fc_host->system_hostname, 0, sizeof(fc_host->system_hostname));
  384. memset(&fc_host->fpin_stats, 0, sizeof(fc_host->fpin_stats));
  385. fc_host->tgtid_bind_type = FC_TGTID_BIND_BY_WWPN;
  386. INIT_LIST_HEAD(&fc_host->rports);
  387. INIT_LIST_HEAD(&fc_host->rport_bindings);
  388. INIT_LIST_HEAD(&fc_host->vports);
  389. fc_host->next_rport_number = 0;
  390. fc_host->next_target_id = 0;
  391. fc_host->next_vport_number = 0;
  392. fc_host->npiv_vports_inuse = 0;
  393. snprintf(fc_host->work_q_name, sizeof(fc_host->work_q_name),
  394. "fc_wq_%d", shost->host_no);
  395. fc_host->work_q = alloc_workqueue("%s", 0, 0, fc_host->work_q_name);
  396. if (!fc_host->work_q)
  397. return -ENOMEM;
  398. fc_host->dev_loss_tmo = fc_dev_loss_tmo;
  399. snprintf(fc_host->devloss_work_q_name,
  400. sizeof(fc_host->devloss_work_q_name),
  401. "fc_dl_%d", shost->host_no);
  402. fc_host->devloss_work_q = alloc_workqueue("%s", 0, 0,
  403. fc_host->devloss_work_q_name);
  404. if (!fc_host->devloss_work_q) {
  405. destroy_workqueue(fc_host->work_q);
  406. fc_host->work_q = NULL;
  407. return -ENOMEM;
  408. }
  409. fc_bsg_hostadd(shost, fc_host);
  410. /* ignore any bsg add error - we just can't do sgio */
  411. return 0;
  412. }
  413. static int fc_host_remove(struct transport_container *tc, struct device *dev,
  414. struct device *cdev)
  415. {
  416. struct Scsi_Host *shost = dev_to_shost(dev);
  417. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  418. fc_bsg_remove(fc_host->rqst_q);
  419. return 0;
  420. }
  421. static DECLARE_TRANSPORT_CLASS(fc_host_class,
  422. "fc_host",
  423. fc_host_setup,
  424. fc_host_remove,
  425. NULL);
  426. /*
  427. * Setup and Remove actions for remote ports are handled
  428. * in the service functions below.
  429. */
  430. static DECLARE_TRANSPORT_CLASS(fc_rport_class,
  431. "fc_remote_ports",
  432. NULL,
  433. NULL,
  434. NULL);
  435. /*
  436. * Setup and Remove actions for virtual ports are handled
  437. * in the service functions below.
  438. */
  439. static DECLARE_TRANSPORT_CLASS(fc_vport_class,
  440. "fc_vports",
  441. NULL,
  442. NULL,
  443. NULL);
  444. /*
  445. * Netlink Infrastructure
  446. */
  447. static atomic_t fc_event_seq;
  448. /**
  449. * fc_get_event_number - Obtain the next sequential FC event number
  450. *
  451. * Notes:
  452. * We could have inlined this, but it would have required fc_event_seq to
  453. * be exposed. For now, live with the subroutine call.
  454. * Atomic used to avoid lock/unlock...
  455. */
  456. u32
  457. fc_get_event_number(void)
  458. {
  459. return atomic_add_return(1, &fc_event_seq);
  460. }
  461. EXPORT_SYMBOL(fc_get_event_number);
  462. /**
  463. * fc_host_post_fc_event - routine to do the work of posting an event
  464. * on an fc_host.
  465. * @shost: host the event occurred on
  466. * @event_number: fc event number obtained from get_fc_event_number()
  467. * @event_code: fc_host event being posted
  468. * @data_len: amount, in bytes, of event data
  469. * @data_buf: pointer to event data
  470. * @vendor_id: value for Vendor id
  471. *
  472. * Notes:
  473. * This routine assumes no locks are held on entry.
  474. */
  475. void
  476. fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
  477. enum fc_host_event_code event_code,
  478. u32 data_len, char *data_buf, u64 vendor_id)
  479. {
  480. struct sk_buff *skb;
  481. struct nlmsghdr *nlh;
  482. struct fc_nl_event *event;
  483. const char *name;
  484. size_t len, padding;
  485. int err;
  486. if (!data_buf || data_len < 4)
  487. data_len = 0;
  488. if (!scsi_nl_sock) {
  489. err = -ENOENT;
  490. goto send_fail;
  491. }
  492. len = FC_NL_MSGALIGN(sizeof(*event) - sizeof(event->event_data) + data_len);
  493. skb = nlmsg_new(len, GFP_KERNEL);
  494. if (!skb) {
  495. err = -ENOBUFS;
  496. goto send_fail;
  497. }
  498. nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
  499. if (!nlh) {
  500. err = -ENOBUFS;
  501. goto send_fail_skb;
  502. }
  503. event = nlmsg_data(nlh);
  504. INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
  505. FC_NL_ASYNC_EVENT, len);
  506. event->seconds = ktime_get_real_seconds();
  507. event->vendor_id = vendor_id;
  508. event->host_no = shost->host_no;
  509. event->event_datalen = data_len; /* bytes */
  510. event->event_num = event_number;
  511. event->event_code = event_code;
  512. if (data_len)
  513. memcpy(event->event_data_flex, data_buf, data_len);
  514. padding = len - offsetof(typeof(*event), event_data_flex) - data_len;
  515. memset(event->event_data_flex + data_len, 0, padding);
  516. nlmsg_multicast(scsi_nl_sock, skb, 0, SCSI_NL_GRP_FC_EVENTS,
  517. GFP_KERNEL);
  518. return;
  519. send_fail_skb:
  520. kfree_skb(skb);
  521. send_fail:
  522. name = get_fc_host_event_code_name(event_code);
  523. printk(KERN_WARNING
  524. "%s: Dropped Event : host %d %s data 0x%08x - err %d\n",
  525. __func__, shost->host_no,
  526. (name) ? name : "<unknown>",
  527. (data_len) ? *((u32 *)data_buf) : 0xFFFFFFFF, err);
  528. return;
  529. }
  530. EXPORT_SYMBOL(fc_host_post_fc_event);
  531. /**
  532. * fc_host_post_event - called to post an even on an fc_host.
  533. * @shost: host the event occurred on
  534. * @event_number: fc event number obtained from get_fc_event_number()
  535. * @event_code: fc_host event being posted
  536. * @event_data: 32bits of data for the event being posted
  537. *
  538. * Notes:
  539. * This routine assumes no locks are held on entry.
  540. */
  541. void
  542. fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
  543. enum fc_host_event_code event_code, u32 event_data)
  544. {
  545. fc_host_post_fc_event(shost, event_number, event_code,
  546. (u32)sizeof(u32), (char *)&event_data, 0);
  547. }
  548. EXPORT_SYMBOL(fc_host_post_event);
  549. /**
  550. * fc_host_post_vendor_event - called to post a vendor unique event
  551. * on an fc_host
  552. * @shost: host the event occurred on
  553. * @event_number: fc event number obtained from get_fc_event_number()
  554. * @data_len: amount, in bytes, of vendor unique data
  555. * @data_buf: pointer to vendor unique data
  556. * @vendor_id: Vendor id
  557. *
  558. * Notes:
  559. * This routine assumes no locks are held on entry.
  560. */
  561. void
  562. fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
  563. u32 data_len, char * data_buf, u64 vendor_id)
  564. {
  565. fc_host_post_fc_event(shost, event_number, FCH_EVT_VENDOR_UNIQUE,
  566. data_len, data_buf, vendor_id);
  567. }
  568. EXPORT_SYMBOL(fc_host_post_vendor_event);
  569. /**
  570. * fc_find_rport_by_wwpn - find the fc_rport pointer for a given wwpn
  571. * @shost: host the fc_rport is associated with
  572. * @wwpn: wwpn of the fc_rport device
  573. *
  574. * Notes:
  575. * This routine assumes no locks are held on entry.
  576. */
  577. struct fc_rport *
  578. fc_find_rport_by_wwpn(struct Scsi_Host *shost, u64 wwpn)
  579. {
  580. struct fc_rport *rport;
  581. unsigned long flags;
  582. spin_lock_irqsave(shost->host_lock, flags);
  583. list_for_each_entry(rport, &fc_host_rports(shost), peers) {
  584. if (rport->port_state != FC_PORTSTATE_ONLINE)
  585. continue;
  586. if (rport->port_name == wwpn) {
  587. spin_unlock_irqrestore(shost->host_lock, flags);
  588. return rport;
  589. }
  590. }
  591. spin_unlock_irqrestore(shost->host_lock, flags);
  592. return NULL;
  593. }
  594. EXPORT_SYMBOL(fc_find_rport_by_wwpn);
  595. static void
  596. fc_li_stats_update(u16 event_type,
  597. struct fc_fpin_stats *stats)
  598. {
  599. stats->li++;
  600. switch (event_type) {
  601. case FPIN_LI_UNKNOWN:
  602. stats->li_failure_unknown++;
  603. break;
  604. case FPIN_LI_LINK_FAILURE:
  605. stats->li_link_failure_count++;
  606. break;
  607. case FPIN_LI_LOSS_OF_SYNC:
  608. stats->li_loss_of_sync_count++;
  609. break;
  610. case FPIN_LI_LOSS_OF_SIG:
  611. stats->li_loss_of_signals_count++;
  612. break;
  613. case FPIN_LI_PRIM_SEQ_ERR:
  614. stats->li_prim_seq_err_count++;
  615. break;
  616. case FPIN_LI_INVALID_TX_WD:
  617. stats->li_invalid_tx_word_count++;
  618. break;
  619. case FPIN_LI_INVALID_CRC:
  620. stats->li_invalid_crc_count++;
  621. break;
  622. case FPIN_LI_DEVICE_SPEC:
  623. stats->li_device_specific++;
  624. break;
  625. }
  626. }
  627. static void
  628. fc_delivery_stats_update(u32 reason_code, struct fc_fpin_stats *stats)
  629. {
  630. stats->dn++;
  631. switch (reason_code) {
  632. case FPIN_DELI_UNKNOWN:
  633. stats->dn_unknown++;
  634. break;
  635. case FPIN_DELI_TIMEOUT:
  636. stats->dn_timeout++;
  637. break;
  638. case FPIN_DELI_UNABLE_TO_ROUTE:
  639. stats->dn_unable_to_route++;
  640. break;
  641. case FPIN_DELI_DEVICE_SPEC:
  642. stats->dn_device_specific++;
  643. break;
  644. }
  645. }
  646. static void
  647. fc_cn_stats_update(u16 event_type, struct fc_fpin_stats *stats)
  648. {
  649. stats->cn++;
  650. switch (event_type) {
  651. case FPIN_CONGN_CLEAR:
  652. stats->cn_clear++;
  653. break;
  654. case FPIN_CONGN_LOST_CREDIT:
  655. stats->cn_lost_credit++;
  656. break;
  657. case FPIN_CONGN_CREDIT_STALL:
  658. stats->cn_credit_stall++;
  659. break;
  660. case FPIN_CONGN_OVERSUBSCRIPTION:
  661. stats->cn_oversubscription++;
  662. break;
  663. case FPIN_CONGN_DEVICE_SPEC:
  664. stats->cn_device_specific++;
  665. }
  666. }
  667. /*
  668. * fc_fpin_li_stats_update - routine to update Link Integrity
  669. * event statistics.
  670. * @shost: host the FPIN was received on
  671. * @tlv: pointer to link integrity descriptor
  672. *
  673. */
  674. static void
  675. fc_fpin_li_stats_update(struct Scsi_Host *shost, struct fc_tlv_desc *tlv)
  676. {
  677. u8 i;
  678. struct fc_rport *rport = NULL;
  679. struct fc_rport *attach_rport = NULL;
  680. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  681. struct fc_fn_li_desc *li_desc = (struct fc_fn_li_desc *)tlv;
  682. u16 event_type = be16_to_cpu(li_desc->event_type);
  683. u64 wwpn;
  684. rport = fc_find_rport_by_wwpn(shost,
  685. be64_to_cpu(li_desc->attached_wwpn));
  686. if (rport &&
  687. (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
  688. rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
  689. attach_rport = rport;
  690. fc_li_stats_update(event_type, &attach_rport->fpin_stats);
  691. }
  692. if (be32_to_cpu(li_desc->pname_count) > 0) {
  693. for (i = 0;
  694. i < be32_to_cpu(li_desc->pname_count);
  695. i++) {
  696. wwpn = be64_to_cpu(li_desc->pname_list[i]);
  697. rport = fc_find_rport_by_wwpn(shost, wwpn);
  698. if (rport &&
  699. (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
  700. rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
  701. if (rport == attach_rport)
  702. continue;
  703. fc_li_stats_update(event_type,
  704. &rport->fpin_stats);
  705. }
  706. }
  707. }
  708. if (fc_host->port_name == be64_to_cpu(li_desc->attached_wwpn))
  709. fc_li_stats_update(event_type, &fc_host->fpin_stats);
  710. }
  711. /*
  712. * fc_fpin_delivery_stats_update - routine to update Delivery Notification
  713. * event statistics.
  714. * @shost: host the FPIN was received on
  715. * @tlv: pointer to delivery descriptor
  716. *
  717. */
  718. static void
  719. fc_fpin_delivery_stats_update(struct Scsi_Host *shost,
  720. struct fc_tlv_desc *tlv)
  721. {
  722. struct fc_rport *rport = NULL;
  723. struct fc_rport *attach_rport = NULL;
  724. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  725. struct fc_fn_deli_desc *dn_desc = (struct fc_fn_deli_desc *)tlv;
  726. u32 reason_code = be32_to_cpu(dn_desc->deli_reason_code);
  727. rport = fc_find_rport_by_wwpn(shost,
  728. be64_to_cpu(dn_desc->attached_wwpn));
  729. if (rport &&
  730. (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
  731. rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
  732. attach_rport = rport;
  733. fc_delivery_stats_update(reason_code,
  734. &attach_rport->fpin_stats);
  735. }
  736. if (fc_host->port_name == be64_to_cpu(dn_desc->attached_wwpn))
  737. fc_delivery_stats_update(reason_code, &fc_host->fpin_stats);
  738. }
  739. /*
  740. * fc_fpin_peer_congn_stats_update - routine to update Peer Congestion
  741. * event statistics.
  742. * @shost: host the FPIN was received on
  743. * @tlv: pointer to peer congestion descriptor
  744. *
  745. */
  746. static void
  747. fc_fpin_peer_congn_stats_update(struct Scsi_Host *shost,
  748. struct fc_tlv_desc *tlv)
  749. {
  750. u8 i;
  751. struct fc_rport *rport = NULL;
  752. struct fc_rport *attach_rport = NULL;
  753. struct fc_fn_peer_congn_desc *pc_desc =
  754. (struct fc_fn_peer_congn_desc *)tlv;
  755. u16 event_type = be16_to_cpu(pc_desc->event_type);
  756. u64 wwpn;
  757. rport = fc_find_rport_by_wwpn(shost,
  758. be64_to_cpu(pc_desc->attached_wwpn));
  759. if (rport &&
  760. (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
  761. rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
  762. attach_rport = rport;
  763. fc_cn_stats_update(event_type, &attach_rport->fpin_stats);
  764. }
  765. if (be32_to_cpu(pc_desc->pname_count) > 0) {
  766. for (i = 0;
  767. i < be32_to_cpu(pc_desc->pname_count);
  768. i++) {
  769. wwpn = be64_to_cpu(pc_desc->pname_list[i]);
  770. rport = fc_find_rport_by_wwpn(shost, wwpn);
  771. if (rport &&
  772. (rport->roles & FC_PORT_ROLE_FCP_TARGET ||
  773. rport->roles & FC_PORT_ROLE_NVME_TARGET)) {
  774. if (rport == attach_rport)
  775. continue;
  776. fc_cn_stats_update(event_type,
  777. &rport->fpin_stats);
  778. }
  779. }
  780. }
  781. }
  782. /*
  783. * fc_fpin_congn_stats_update - routine to update Congestion
  784. * event statistics.
  785. * @shost: host the FPIN was received on
  786. * @tlv: pointer to congestion descriptor
  787. *
  788. */
  789. static void
  790. fc_fpin_congn_stats_update(struct Scsi_Host *shost,
  791. struct fc_tlv_desc *tlv)
  792. {
  793. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  794. struct fc_fn_congn_desc *congn = (struct fc_fn_congn_desc *)tlv;
  795. fc_cn_stats_update(be16_to_cpu(congn->event_type),
  796. &fc_host->fpin_stats);
  797. }
  798. /**
  799. * fc_host_fpin_rcv - routine to process a received FPIN.
  800. * @shost: host the FPIN was received on
  801. * @fpin_len: length of FPIN payload, in bytes
  802. * @fpin_buf: pointer to FPIN payload
  803. *
  804. * Notes:
  805. * This routine assumes no locks are held on entry.
  806. */
  807. void
  808. fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf)
  809. {
  810. struct fc_els_fpin *fpin = (struct fc_els_fpin *)fpin_buf;
  811. struct fc_tlv_desc *tlv;
  812. u32 desc_cnt = 0, bytes_remain;
  813. u32 dtag;
  814. /* Update Statistics */
  815. tlv = (struct fc_tlv_desc *)&fpin->fpin_desc[0];
  816. bytes_remain = fpin_len - offsetof(struct fc_els_fpin, fpin_desc);
  817. bytes_remain = min_t(u32, bytes_remain, be32_to_cpu(fpin->desc_len));
  818. while (bytes_remain >= FC_TLV_DESC_HDR_SZ &&
  819. bytes_remain >= FC_TLV_DESC_SZ_FROM_LENGTH(tlv)) {
  820. dtag = be32_to_cpu(tlv->desc_tag);
  821. switch (dtag) {
  822. case ELS_DTAG_LNK_INTEGRITY:
  823. fc_fpin_li_stats_update(shost, tlv);
  824. break;
  825. case ELS_DTAG_DELIVERY:
  826. fc_fpin_delivery_stats_update(shost, tlv);
  827. break;
  828. case ELS_DTAG_PEER_CONGEST:
  829. fc_fpin_peer_congn_stats_update(shost, tlv);
  830. break;
  831. case ELS_DTAG_CONGESTION:
  832. fc_fpin_congn_stats_update(shost, tlv);
  833. }
  834. desc_cnt++;
  835. bytes_remain -= FC_TLV_DESC_SZ_FROM_LENGTH(tlv);
  836. tlv = fc_tlv_next_desc(tlv);
  837. }
  838. fc_host_post_fc_event(shost, fc_get_event_number(),
  839. FCH_EVT_LINK_FPIN, fpin_len, fpin_buf, 0);
  840. }
  841. EXPORT_SYMBOL(fc_host_fpin_rcv);
  842. static __init int fc_transport_init(void)
  843. {
  844. int error;
  845. atomic_set(&fc_event_seq, 0);
  846. error = transport_class_register(&fc_host_class);
  847. if (error)
  848. return error;
  849. error = transport_class_register(&fc_vport_class);
  850. if (error)
  851. goto unreg_host_class;
  852. error = transport_class_register(&fc_rport_class);
  853. if (error)
  854. goto unreg_vport_class;
  855. error = transport_class_register(&fc_transport_class);
  856. if (error)
  857. goto unreg_rport_class;
  858. return 0;
  859. unreg_rport_class:
  860. transport_class_unregister(&fc_rport_class);
  861. unreg_vport_class:
  862. transport_class_unregister(&fc_vport_class);
  863. unreg_host_class:
  864. transport_class_unregister(&fc_host_class);
  865. return error;
  866. }
  867. static void __exit fc_transport_exit(void)
  868. {
  869. transport_class_unregister(&fc_transport_class);
  870. transport_class_unregister(&fc_rport_class);
  871. transport_class_unregister(&fc_host_class);
  872. transport_class_unregister(&fc_vport_class);
  873. }
  874. /*
  875. * FC Remote Port Attribute Management
  876. */
  877. #define fc_rport_show_function(field, format_string, sz, cast) \
  878. static ssize_t \
  879. show_fc_rport_##field (struct device *dev, \
  880. struct device_attribute *attr, char *buf) \
  881. { \
  882. struct fc_rport *rport = transport_class_to_rport(dev); \
  883. struct Scsi_Host *shost = rport_to_shost(rport); \
  884. struct fc_internal *i = to_fc_internal(shost->transportt); \
  885. if ((i->f->get_rport_##field) && \
  886. !((rport->port_state == FC_PORTSTATE_BLOCKED) || \
  887. (rport->port_state == FC_PORTSTATE_DELETED) || \
  888. (rport->port_state == FC_PORTSTATE_NOTPRESENT))) \
  889. i->f->get_rport_##field(rport); \
  890. return snprintf(buf, sz, format_string, cast rport->field); \
  891. }
  892. #define fc_rport_store_function(field) \
  893. static ssize_t \
  894. store_fc_rport_##field(struct device *dev, \
  895. struct device_attribute *attr, \
  896. const char *buf, size_t count) \
  897. { \
  898. int val; \
  899. struct fc_rport *rport = transport_class_to_rport(dev); \
  900. struct Scsi_Host *shost = rport_to_shost(rport); \
  901. struct fc_internal *i = to_fc_internal(shost->transportt); \
  902. char *cp; \
  903. if ((rport->port_state == FC_PORTSTATE_BLOCKED) || \
  904. (rport->port_state == FC_PORTSTATE_DELETED) || \
  905. (rport->port_state == FC_PORTSTATE_NOTPRESENT)) \
  906. return -EBUSY; \
  907. val = simple_strtoul(buf, &cp, 0); \
  908. if (*cp && (*cp != '\n')) \
  909. return -EINVAL; \
  910. i->f->set_rport_##field(rport, val); \
  911. return count; \
  912. }
  913. #define fc_rport_rd_attr(field, format_string, sz) \
  914. fc_rport_show_function(field, format_string, sz, ) \
  915. static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
  916. show_fc_rport_##field, NULL)
  917. #define fc_rport_rd_attr_cast(field, format_string, sz, cast) \
  918. fc_rport_show_function(field, format_string, sz, (cast)) \
  919. static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
  920. show_fc_rport_##field, NULL)
  921. #define fc_rport_rw_attr(field, format_string, sz) \
  922. fc_rport_show_function(field, format_string, sz, ) \
  923. fc_rport_store_function(field) \
  924. static FC_DEVICE_ATTR(rport, field, S_IRUGO | S_IWUSR, \
  925. show_fc_rport_##field, \
  926. store_fc_rport_##field)
  927. #define fc_private_rport_show_function(field, format_string, sz, cast) \
  928. static ssize_t \
  929. show_fc_rport_##field (struct device *dev, \
  930. struct device_attribute *attr, char *buf) \
  931. { \
  932. struct fc_rport *rport = transport_class_to_rport(dev); \
  933. return snprintf(buf, sz, format_string, cast rport->field); \
  934. }
  935. #define fc_private_rport_rd_attr(field, format_string, sz) \
  936. fc_private_rport_show_function(field, format_string, sz, ) \
  937. static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
  938. show_fc_rport_##field, NULL)
  939. #define fc_private_rport_rd_attr_cast(field, format_string, sz, cast) \
  940. fc_private_rport_show_function(field, format_string, sz, (cast)) \
  941. static FC_DEVICE_ATTR(rport, field, S_IRUGO, \
  942. show_fc_rport_##field, NULL)
  943. #define fc_private_rport_rd_enum_attr(title, maxlen) \
  944. static ssize_t \
  945. show_fc_rport_##title (struct device *dev, \
  946. struct device_attribute *attr, char *buf) \
  947. { \
  948. struct fc_rport *rport = transport_class_to_rport(dev); \
  949. const char *name; \
  950. name = get_fc_##title##_name(rport->title); \
  951. if (!name) \
  952. return -EINVAL; \
  953. return snprintf(buf, maxlen, "%s\n", name); \
  954. } \
  955. static FC_DEVICE_ATTR(rport, title, S_IRUGO, \
  956. show_fc_rport_##title, NULL)
  957. #define SETUP_RPORT_ATTRIBUTE_RD(field) \
  958. i->private_rport_attrs[count] = device_attr_rport_##field; \
  959. i->private_rport_attrs[count].attr.mode = S_IRUGO; \
  960. i->private_rport_attrs[count].store = NULL; \
  961. i->rport_attrs[count] = &i->private_rport_attrs[count]; \
  962. if (i->f->show_rport_##field) \
  963. count++
  964. #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(field) \
  965. i->private_rport_attrs[count] = device_attr_rport_##field; \
  966. i->private_rport_attrs[count].attr.mode = S_IRUGO; \
  967. i->private_rport_attrs[count].store = NULL; \
  968. i->rport_attrs[count] = &i->private_rport_attrs[count]; \
  969. count++
  970. #define SETUP_RPORT_ATTRIBUTE_RW(field) \
  971. i->private_rport_attrs[count] = device_attr_rport_##field; \
  972. if (!i->f->set_rport_##field) { \
  973. i->private_rport_attrs[count].attr.mode = S_IRUGO; \
  974. i->private_rport_attrs[count].store = NULL; \
  975. } \
  976. i->rport_attrs[count] = &i->private_rport_attrs[count]; \
  977. if (i->f->show_rport_##field) \
  978. count++
  979. #define SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(field) \
  980. { \
  981. i->private_rport_attrs[count] = device_attr_rport_##field; \
  982. i->rport_attrs[count] = &i->private_rport_attrs[count]; \
  983. count++; \
  984. }
  985. /* The FC Transport Remote Port Attributes: */
  986. /* Fixed Remote Port Attributes */
  987. fc_private_rport_rd_attr(maxframe_size, "%u bytes\n", 20);
  988. static ssize_t
  989. show_fc_rport_supported_classes (struct device *dev,
  990. struct device_attribute *attr, char *buf)
  991. {
  992. struct fc_rport *rport = transport_class_to_rport(dev);
  993. if (rport->supported_classes == FC_COS_UNSPECIFIED)
  994. return snprintf(buf, 20, "unspecified\n");
  995. return get_fc_cos_names(rport->supported_classes, buf);
  996. }
  997. static FC_DEVICE_ATTR(rport, supported_classes, S_IRUGO,
  998. show_fc_rport_supported_classes, NULL);
  999. /* Dynamic Remote Port Attributes */
  1000. /*
  1001. * dev_loss_tmo attribute
  1002. */
  1003. static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
  1004. {
  1005. char *cp;
  1006. *val = simple_strtoul(buf, &cp, 0);
  1007. if (*cp && (*cp != '\n'))
  1008. return -EINVAL;
  1009. /*
  1010. * Check for overflow; dev_loss_tmo is u32
  1011. */
  1012. if (*val > UINT_MAX)
  1013. return -EINVAL;
  1014. return 0;
  1015. }
  1016. static int fc_rport_set_dev_loss_tmo(struct fc_rport *rport,
  1017. unsigned long val)
  1018. {
  1019. struct Scsi_Host *shost = rport_to_shost(rport);
  1020. struct fc_internal *i = to_fc_internal(shost->transportt);
  1021. if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
  1022. (rport->port_state == FC_PORTSTATE_DELETED) ||
  1023. (rport->port_state == FC_PORTSTATE_NOTPRESENT))
  1024. return -EBUSY;
  1025. /*
  1026. * Check for overflow; dev_loss_tmo is u32
  1027. */
  1028. if (val > UINT_MAX)
  1029. return -EINVAL;
  1030. /*
  1031. * If fast_io_fail is off we have to cap
  1032. * dev_loss_tmo at SCSI_DEVICE_BLOCK_MAX_TIMEOUT
  1033. */
  1034. if (rport->fast_io_fail_tmo == -1 &&
  1035. val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
  1036. return -EINVAL;
  1037. i->f->set_rport_dev_loss_tmo(rport, val);
  1038. return 0;
  1039. }
  1040. fc_rport_show_function(dev_loss_tmo, "%u\n", 20, )
  1041. static ssize_t
  1042. store_fc_rport_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
  1043. const char *buf, size_t count)
  1044. {
  1045. struct fc_rport *rport = transport_class_to_rport(dev);
  1046. unsigned long val;
  1047. int rc;
  1048. rc = fc_str_to_dev_loss(buf, &val);
  1049. if (rc)
  1050. return rc;
  1051. rc = fc_rport_set_dev_loss_tmo(rport, val);
  1052. if (rc)
  1053. return rc;
  1054. return count;
  1055. }
  1056. static FC_DEVICE_ATTR(rport, dev_loss_tmo, S_IRUGO | S_IWUSR,
  1057. show_fc_rport_dev_loss_tmo, store_fc_rport_dev_loss_tmo);
  1058. /* Private Remote Port Attributes */
  1059. fc_private_rport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
  1060. fc_private_rport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
  1061. fc_private_rport_rd_attr(port_id, "0x%06x\n", 20);
  1062. static ssize_t
  1063. show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
  1064. char *buf)
  1065. {
  1066. struct fc_rport *rport = transport_class_to_rport(dev);
  1067. /* identify any roles that are port_id specific */
  1068. if ((rport->port_id != -1) &&
  1069. (rport->port_id & FC_WELLKNOWN_PORTID_MASK) ==
  1070. FC_WELLKNOWN_PORTID_MASK) {
  1071. switch (rport->port_id & FC_WELLKNOWN_ROLE_MASK) {
  1072. case FC_FPORT_PORTID:
  1073. return snprintf(buf, 30, "Fabric Port\n");
  1074. case FC_FABCTLR_PORTID:
  1075. return snprintf(buf, 30, "Fabric Controller\n");
  1076. case FC_DIRSRVR_PORTID:
  1077. return snprintf(buf, 30, "Directory Server\n");
  1078. case FC_TIMESRVR_PORTID:
  1079. return snprintf(buf, 30, "Time Server\n");
  1080. case FC_MGMTSRVR_PORTID:
  1081. return snprintf(buf, 30, "Management Server\n");
  1082. default:
  1083. return snprintf(buf, 30, "Unknown Fabric Entity\n");
  1084. }
  1085. } else {
  1086. if (rport->roles == FC_PORT_ROLE_UNKNOWN)
  1087. return snprintf(buf, 20, "unknown\n");
  1088. return get_fc_port_roles_names(rport->roles, buf);
  1089. }
  1090. }
  1091. static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
  1092. show_fc_rport_roles, NULL);
  1093. static ssize_t fc_rport_set_marginal_state(struct device *dev,
  1094. struct device_attribute *attr,
  1095. const char *buf, size_t count)
  1096. {
  1097. struct fc_rport *rport = transport_class_to_rport(dev);
  1098. enum fc_port_state port_state;
  1099. int ret = 0;
  1100. ret = get_fc_port_state_match(buf, &port_state);
  1101. if (ret)
  1102. return -EINVAL;
  1103. if (port_state == FC_PORTSTATE_MARGINAL) {
  1104. /*
  1105. * Change the state to Marginal only if the
  1106. * current rport state is Online
  1107. * Allow only Online->Marginal
  1108. */
  1109. if (rport->port_state == FC_PORTSTATE_ONLINE)
  1110. rport->port_state = port_state;
  1111. else
  1112. return -EINVAL;
  1113. } else if (port_state == FC_PORTSTATE_ONLINE) {
  1114. /*
  1115. * Change the state to Online only if the
  1116. * current rport state is Marginal
  1117. * Allow only Marginal->Online
  1118. */
  1119. if (rport->port_state == FC_PORTSTATE_MARGINAL)
  1120. rport->port_state = port_state;
  1121. else
  1122. return -EINVAL;
  1123. } else
  1124. return -EINVAL;
  1125. return count;
  1126. }
  1127. static ssize_t
  1128. show_fc_rport_port_state(struct device *dev,
  1129. struct device_attribute *attr, char *buf)
  1130. {
  1131. const char *name;
  1132. struct fc_rport *rport = transport_class_to_rport(dev);
  1133. name = get_fc_port_state_name(rport->port_state);
  1134. if (!name)
  1135. return -EINVAL;
  1136. return snprintf(buf, 20, "%s\n", name);
  1137. }
  1138. static FC_DEVICE_ATTR(rport, port_state, 0444 | 0200,
  1139. show_fc_rport_port_state, fc_rport_set_marginal_state);
  1140. fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
  1141. /*
  1142. * fast_io_fail_tmo attribute
  1143. */
  1144. static ssize_t
  1145. show_fc_rport_fast_io_fail_tmo (struct device *dev,
  1146. struct device_attribute *attr, char *buf)
  1147. {
  1148. struct fc_rport *rport = transport_class_to_rport(dev);
  1149. if (rport->fast_io_fail_tmo == -1)
  1150. return snprintf(buf, 5, "off\n");
  1151. return snprintf(buf, 20, "%d\n", rport->fast_io_fail_tmo);
  1152. }
  1153. static ssize_t
  1154. store_fc_rport_fast_io_fail_tmo(struct device *dev,
  1155. struct device_attribute *attr, const char *buf,
  1156. size_t count)
  1157. {
  1158. int val;
  1159. char *cp;
  1160. struct fc_rport *rport = transport_class_to_rport(dev);
  1161. if ((rport->port_state == FC_PORTSTATE_BLOCKED) ||
  1162. (rport->port_state == FC_PORTSTATE_DELETED) ||
  1163. (rport->port_state == FC_PORTSTATE_NOTPRESENT))
  1164. return -EBUSY;
  1165. if (strncmp(buf, "off", 3) == 0)
  1166. rport->fast_io_fail_tmo = -1;
  1167. else {
  1168. val = simple_strtoul(buf, &cp, 0);
  1169. if ((*cp && (*cp != '\n')) || (val < 0))
  1170. return -EINVAL;
  1171. /*
  1172. * Cap fast_io_fail by dev_loss_tmo or
  1173. * SCSI_DEVICE_BLOCK_MAX_TIMEOUT.
  1174. */
  1175. if ((val >= rport->dev_loss_tmo) ||
  1176. (val > SCSI_DEVICE_BLOCK_MAX_TIMEOUT))
  1177. return -EINVAL;
  1178. rport->fast_io_fail_tmo = val;
  1179. }
  1180. return count;
  1181. }
  1182. static FC_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
  1183. show_fc_rport_fast_io_fail_tmo, store_fc_rport_fast_io_fail_tmo);
  1184. #define fc_rport_fpin_statistic(name) \
  1185. static ssize_t fc_rport_fpinstat_##name(struct device *cd, \
  1186. struct device_attribute *attr, \
  1187. char *buf) \
  1188. { \
  1189. struct fc_rport *rport = transport_class_to_rport(cd); \
  1190. \
  1191. return snprintf(buf, 20, "0x%llx\n", rport->fpin_stats.name); \
  1192. } \
  1193. static FC_DEVICE_ATTR(rport, fpin_##name, 0444, fc_rport_fpinstat_##name, NULL)
  1194. fc_rport_fpin_statistic(dn);
  1195. fc_rport_fpin_statistic(dn_unknown);
  1196. fc_rport_fpin_statistic(dn_timeout);
  1197. fc_rport_fpin_statistic(dn_unable_to_route);
  1198. fc_rport_fpin_statistic(dn_device_specific);
  1199. fc_rport_fpin_statistic(cn);
  1200. fc_rport_fpin_statistic(cn_clear);
  1201. fc_rport_fpin_statistic(cn_lost_credit);
  1202. fc_rport_fpin_statistic(cn_credit_stall);
  1203. fc_rport_fpin_statistic(cn_oversubscription);
  1204. fc_rport_fpin_statistic(cn_device_specific);
  1205. fc_rport_fpin_statistic(li);
  1206. fc_rport_fpin_statistic(li_failure_unknown);
  1207. fc_rport_fpin_statistic(li_link_failure_count);
  1208. fc_rport_fpin_statistic(li_loss_of_sync_count);
  1209. fc_rport_fpin_statistic(li_loss_of_signals_count);
  1210. fc_rport_fpin_statistic(li_prim_seq_err_count);
  1211. fc_rport_fpin_statistic(li_invalid_tx_word_count);
  1212. fc_rport_fpin_statistic(li_invalid_crc_count);
  1213. fc_rport_fpin_statistic(li_device_specific);
  1214. static struct attribute *fc_rport_statistics_attrs[] = {
  1215. &device_attr_rport_fpin_dn.attr,
  1216. &device_attr_rport_fpin_dn_unknown.attr,
  1217. &device_attr_rport_fpin_dn_timeout.attr,
  1218. &device_attr_rport_fpin_dn_unable_to_route.attr,
  1219. &device_attr_rport_fpin_dn_device_specific.attr,
  1220. &device_attr_rport_fpin_li.attr,
  1221. &device_attr_rport_fpin_li_failure_unknown.attr,
  1222. &device_attr_rport_fpin_li_link_failure_count.attr,
  1223. &device_attr_rport_fpin_li_loss_of_sync_count.attr,
  1224. &device_attr_rport_fpin_li_loss_of_signals_count.attr,
  1225. &device_attr_rport_fpin_li_prim_seq_err_count.attr,
  1226. &device_attr_rport_fpin_li_invalid_tx_word_count.attr,
  1227. &device_attr_rport_fpin_li_invalid_crc_count.attr,
  1228. &device_attr_rport_fpin_li_device_specific.attr,
  1229. &device_attr_rport_fpin_cn.attr,
  1230. &device_attr_rport_fpin_cn_clear.attr,
  1231. &device_attr_rport_fpin_cn_lost_credit.attr,
  1232. &device_attr_rport_fpin_cn_credit_stall.attr,
  1233. &device_attr_rport_fpin_cn_oversubscription.attr,
  1234. &device_attr_rport_fpin_cn_device_specific.attr,
  1235. NULL
  1236. };
  1237. static struct attribute_group fc_rport_statistics_group = {
  1238. .name = "statistics",
  1239. .attrs = fc_rport_statistics_attrs,
  1240. };
  1241. /*
  1242. * FC SCSI Target Attribute Management
  1243. */
  1244. /*
  1245. * Note: in the target show function we recognize when the remote
  1246. * port is in the hierarchy and do not allow the driver to get
  1247. * involved in sysfs functions. The driver only gets involved if
  1248. * it's the "old" style that doesn't use rports.
  1249. */
  1250. #define fc_starget_show_function(field, format_string, sz, cast) \
  1251. static ssize_t \
  1252. show_fc_starget_##field (struct device *dev, \
  1253. struct device_attribute *attr, char *buf) \
  1254. { \
  1255. struct scsi_target *starget = transport_class_to_starget(dev); \
  1256. struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \
  1257. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1258. struct fc_rport *rport = starget_to_rport(starget); \
  1259. if (rport) \
  1260. fc_starget_##field(starget) = rport->field; \
  1261. else if (i->f->get_starget_##field) \
  1262. i->f->get_starget_##field(starget); \
  1263. return snprintf(buf, sz, format_string, \
  1264. cast fc_starget_##field(starget)); \
  1265. }
  1266. #define fc_starget_rd_attr(field, format_string, sz) \
  1267. fc_starget_show_function(field, format_string, sz, ) \
  1268. static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
  1269. show_fc_starget_##field, NULL)
  1270. #define fc_starget_rd_attr_cast(field, format_string, sz, cast) \
  1271. fc_starget_show_function(field, format_string, sz, (cast)) \
  1272. static FC_DEVICE_ATTR(starget, field, S_IRUGO, \
  1273. show_fc_starget_##field, NULL)
  1274. #define SETUP_STARGET_ATTRIBUTE_RD(field) \
  1275. i->private_starget_attrs[count] = device_attr_starget_##field; \
  1276. i->private_starget_attrs[count].attr.mode = S_IRUGO; \
  1277. i->private_starget_attrs[count].store = NULL; \
  1278. i->starget_attrs[count] = &i->private_starget_attrs[count]; \
  1279. if (i->f->show_starget_##field) \
  1280. count++
  1281. #define SETUP_STARGET_ATTRIBUTE_RW(field) \
  1282. i->private_starget_attrs[count] = device_attr_starget_##field; \
  1283. if (!i->f->set_starget_##field) { \
  1284. i->private_starget_attrs[count].attr.mode = S_IRUGO; \
  1285. i->private_starget_attrs[count].store = NULL; \
  1286. } \
  1287. i->starget_attrs[count] = &i->private_starget_attrs[count]; \
  1288. if (i->f->show_starget_##field) \
  1289. count++
  1290. /* The FC Transport SCSI Target Attributes: */
  1291. fc_starget_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
  1292. fc_starget_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
  1293. fc_starget_rd_attr(port_id, "0x%06x\n", 20);
  1294. /*
  1295. * FC Virtual Port Attribute Management
  1296. */
  1297. #define fc_vport_show_function(field, format_string, sz, cast) \
  1298. static ssize_t \
  1299. show_fc_vport_##field (struct device *dev, \
  1300. struct device_attribute *attr, char *buf) \
  1301. { \
  1302. struct fc_vport *vport = transport_class_to_vport(dev); \
  1303. struct Scsi_Host *shost = vport_to_shost(vport); \
  1304. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1305. if ((i->f->get_vport_##field) && \
  1306. !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
  1307. i->f->get_vport_##field(vport); \
  1308. return snprintf(buf, sz, format_string, cast vport->field); \
  1309. }
  1310. #define fc_vport_store_function(field) \
  1311. static ssize_t \
  1312. store_fc_vport_##field(struct device *dev, \
  1313. struct device_attribute *attr, \
  1314. const char *buf, size_t count) \
  1315. { \
  1316. int val; \
  1317. struct fc_vport *vport = transport_class_to_vport(dev); \
  1318. struct Scsi_Host *shost = vport_to_shost(vport); \
  1319. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1320. char *cp; \
  1321. if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
  1322. return -EBUSY; \
  1323. val = simple_strtoul(buf, &cp, 0); \
  1324. if (*cp && (*cp != '\n')) \
  1325. return -EINVAL; \
  1326. i->f->set_vport_##field(vport, val); \
  1327. return count; \
  1328. }
  1329. #define fc_vport_store_str_function(field, slen) \
  1330. static ssize_t \
  1331. store_fc_vport_##field(struct device *dev, \
  1332. struct device_attribute *attr, \
  1333. const char *buf, size_t count) \
  1334. { \
  1335. struct fc_vport *vport = transport_class_to_vport(dev); \
  1336. struct Scsi_Host *shost = vport_to_shost(vport); \
  1337. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1338. unsigned int cnt=count; \
  1339. \
  1340. /* count may include a LF at end of string */ \
  1341. if (buf[cnt-1] == '\n') \
  1342. cnt--; \
  1343. if (cnt > ((slen) - 1)) \
  1344. return -EINVAL; \
  1345. memcpy(vport->field, buf, cnt); \
  1346. i->f->set_vport_##field(vport); \
  1347. return count; \
  1348. }
  1349. #define fc_vport_rd_attr(field, format_string, sz) \
  1350. fc_vport_show_function(field, format_string, sz, ) \
  1351. static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
  1352. show_fc_vport_##field, NULL)
  1353. #define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
  1354. fc_vport_show_function(field, format_string, sz, (cast)) \
  1355. static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
  1356. show_fc_vport_##field, NULL)
  1357. #define fc_vport_rw_attr(field, format_string, sz) \
  1358. fc_vport_show_function(field, format_string, sz, ) \
  1359. fc_vport_store_function(field) \
  1360. static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
  1361. show_fc_vport_##field, \
  1362. store_fc_vport_##field)
  1363. #define fc_private_vport_show_function(field, format_string, sz, cast) \
  1364. static ssize_t \
  1365. show_fc_vport_##field (struct device *dev, \
  1366. struct device_attribute *attr, char *buf) \
  1367. { \
  1368. struct fc_vport *vport = transport_class_to_vport(dev); \
  1369. return snprintf(buf, sz, format_string, cast vport->field); \
  1370. }
  1371. #define fc_private_vport_store_u32_function(field) \
  1372. static ssize_t \
  1373. store_fc_vport_##field(struct device *dev, \
  1374. struct device_attribute *attr, \
  1375. const char *buf, size_t count) \
  1376. { \
  1377. u32 val; \
  1378. struct fc_vport *vport = transport_class_to_vport(dev); \
  1379. char *cp; \
  1380. if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
  1381. return -EBUSY; \
  1382. val = simple_strtoul(buf, &cp, 0); \
  1383. if (*cp && (*cp != '\n')) \
  1384. return -EINVAL; \
  1385. vport->field = val; \
  1386. return count; \
  1387. }
  1388. #define fc_private_vport_rd_attr(field, format_string, sz) \
  1389. fc_private_vport_show_function(field, format_string, sz, ) \
  1390. static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
  1391. show_fc_vport_##field, NULL)
  1392. #define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
  1393. fc_private_vport_show_function(field, format_string, sz, (cast)) \
  1394. static FC_DEVICE_ATTR(vport, field, S_IRUGO, \
  1395. show_fc_vport_##field, NULL)
  1396. #define fc_private_vport_rw_u32_attr(field, format_string, sz) \
  1397. fc_private_vport_show_function(field, format_string, sz, ) \
  1398. fc_private_vport_store_u32_function(field) \
  1399. static FC_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
  1400. show_fc_vport_##field, \
  1401. store_fc_vport_##field)
  1402. #define fc_private_vport_rd_enum_attr(title, maxlen) \
  1403. static ssize_t \
  1404. show_fc_vport_##title (struct device *dev, \
  1405. struct device_attribute *attr, \
  1406. char *buf) \
  1407. { \
  1408. struct fc_vport *vport = transport_class_to_vport(dev); \
  1409. const char *name; \
  1410. name = get_fc_##title##_name(vport->title); \
  1411. if (!name) \
  1412. return -EINVAL; \
  1413. return snprintf(buf, maxlen, "%s\n", name); \
  1414. } \
  1415. static FC_DEVICE_ATTR(vport, title, S_IRUGO, \
  1416. show_fc_vport_##title, NULL)
  1417. #define SETUP_VPORT_ATTRIBUTE_RD(field) \
  1418. i->private_vport_attrs[count] = device_attr_vport_##field; \
  1419. i->private_vport_attrs[count].attr.mode = S_IRUGO; \
  1420. i->private_vport_attrs[count].store = NULL; \
  1421. i->vport_attrs[count] = &i->private_vport_attrs[count]; \
  1422. if (i->f->get_##field) \
  1423. count++
  1424. /* NOTE: Above MACRO differs: checks function not show bit */
  1425. #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
  1426. i->private_vport_attrs[count] = device_attr_vport_##field; \
  1427. i->private_vport_attrs[count].attr.mode = S_IRUGO; \
  1428. i->private_vport_attrs[count].store = NULL; \
  1429. i->vport_attrs[count] = &i->private_vport_attrs[count]; \
  1430. count++
  1431. #define SETUP_VPORT_ATTRIBUTE_WR(field) \
  1432. i->private_vport_attrs[count] = device_attr_vport_##field; \
  1433. i->vport_attrs[count] = &i->private_vport_attrs[count]; \
  1434. if (i->f->field) \
  1435. count++
  1436. /* NOTE: Above MACRO differs: checks function */
  1437. #define SETUP_VPORT_ATTRIBUTE_RW(field) \
  1438. i->private_vport_attrs[count] = device_attr_vport_##field; \
  1439. if (!i->f->set_vport_##field) { \
  1440. i->private_vport_attrs[count].attr.mode = S_IRUGO; \
  1441. i->private_vport_attrs[count].store = NULL; \
  1442. } \
  1443. i->vport_attrs[count] = &i->private_vport_attrs[count]; \
  1444. count++
  1445. /* NOTE: Above MACRO differs: does not check show bit */
  1446. #define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
  1447. { \
  1448. i->private_vport_attrs[count] = device_attr_vport_##field; \
  1449. i->vport_attrs[count] = &i->private_vport_attrs[count]; \
  1450. count++; \
  1451. }
  1452. /* The FC Transport Virtual Port Attributes: */
  1453. /* Fixed Virtual Port Attributes */
  1454. /* Dynamic Virtual Port Attributes */
  1455. /* Private Virtual Port Attributes */
  1456. fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
  1457. fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
  1458. fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
  1459. fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
  1460. static ssize_t
  1461. show_fc_vport_roles (struct device *dev, struct device_attribute *attr,
  1462. char *buf)
  1463. {
  1464. struct fc_vport *vport = transport_class_to_vport(dev);
  1465. if (vport->roles == FC_PORT_ROLE_UNKNOWN)
  1466. return snprintf(buf, 20, "unknown\n");
  1467. return get_fc_port_roles_names(vport->roles, buf);
  1468. }
  1469. static FC_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
  1470. fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
  1471. fc_private_vport_show_function(symbolic_name, "%s\n",
  1472. FC_VPORT_SYMBOLIC_NAMELEN + 1, )
  1473. fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
  1474. static FC_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
  1475. show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
  1476. static ssize_t
  1477. store_fc_vport_delete(struct device *dev, struct device_attribute *attr,
  1478. const char *buf, size_t count)
  1479. {
  1480. struct fc_vport *vport = transport_class_to_vport(dev);
  1481. struct Scsi_Host *shost = vport_to_shost(vport);
  1482. unsigned long flags;
  1483. spin_lock_irqsave(shost->host_lock, flags);
  1484. if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
  1485. spin_unlock_irqrestore(shost->host_lock, flags);
  1486. return -EBUSY;
  1487. }
  1488. vport->flags |= FC_VPORT_DELETING;
  1489. spin_unlock_irqrestore(shost->host_lock, flags);
  1490. fc_queue_work(shost, &vport->vport_delete_work);
  1491. return count;
  1492. }
  1493. static FC_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
  1494. NULL, store_fc_vport_delete);
  1495. /*
  1496. * Enable/Disable vport
  1497. * Write "1" to disable, write "0" to enable
  1498. */
  1499. static ssize_t
  1500. store_fc_vport_disable(struct device *dev, struct device_attribute *attr,
  1501. const char *buf,
  1502. size_t count)
  1503. {
  1504. struct fc_vport *vport = transport_class_to_vport(dev);
  1505. struct Scsi_Host *shost = vport_to_shost(vport);
  1506. struct fc_internal *i = to_fc_internal(shost->transportt);
  1507. int stat;
  1508. if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
  1509. return -EBUSY;
  1510. if (*buf == '0') {
  1511. if (vport->vport_state != FC_VPORT_DISABLED)
  1512. return -EALREADY;
  1513. } else if (*buf == '1') {
  1514. if (vport->vport_state == FC_VPORT_DISABLED)
  1515. return -EALREADY;
  1516. } else
  1517. return -EINVAL;
  1518. stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
  1519. return stat ? stat : count;
  1520. }
  1521. static FC_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
  1522. NULL, store_fc_vport_disable);
  1523. /*
  1524. * Host Attribute Management
  1525. */
  1526. #define fc_host_show_function(field, format_string, sz, cast) \
  1527. static ssize_t \
  1528. show_fc_host_##field (struct device *dev, \
  1529. struct device_attribute *attr, char *buf) \
  1530. { \
  1531. struct Scsi_Host *shost = transport_class_to_shost(dev); \
  1532. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1533. if (i->f->get_host_##field) \
  1534. i->f->get_host_##field(shost); \
  1535. return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
  1536. }
  1537. #define fc_host_store_function(field) \
  1538. static ssize_t \
  1539. store_fc_host_##field(struct device *dev, \
  1540. struct device_attribute *attr, \
  1541. const char *buf, size_t count) \
  1542. { \
  1543. int val; \
  1544. struct Scsi_Host *shost = transport_class_to_shost(dev); \
  1545. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1546. char *cp; \
  1547. \
  1548. val = simple_strtoul(buf, &cp, 0); \
  1549. if (*cp && (*cp != '\n')) \
  1550. return -EINVAL; \
  1551. i->f->set_host_##field(shost, val); \
  1552. return count; \
  1553. }
  1554. #define fc_host_store_str_function(field, slen) \
  1555. static ssize_t \
  1556. store_fc_host_##field(struct device *dev, \
  1557. struct device_attribute *attr, \
  1558. const char *buf, size_t count) \
  1559. { \
  1560. struct Scsi_Host *shost = transport_class_to_shost(dev); \
  1561. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1562. unsigned int cnt=count; \
  1563. \
  1564. /* count may include a LF at end of string */ \
  1565. if (buf[cnt-1] == '\n') \
  1566. cnt--; \
  1567. if (cnt > ((slen) - 1)) \
  1568. return -EINVAL; \
  1569. memcpy(fc_host_##field(shost), buf, cnt); \
  1570. i->f->set_host_##field(shost); \
  1571. return count; \
  1572. }
  1573. #define fc_host_rd_attr(field, format_string, sz) \
  1574. fc_host_show_function(field, format_string, sz, ) \
  1575. static FC_DEVICE_ATTR(host, field, S_IRUGO, \
  1576. show_fc_host_##field, NULL)
  1577. #define fc_host_rd_attr_cast(field, format_string, sz, cast) \
  1578. fc_host_show_function(field, format_string, sz, (cast)) \
  1579. static FC_DEVICE_ATTR(host, field, S_IRUGO, \
  1580. show_fc_host_##field, NULL)
  1581. #define fc_host_rw_attr(field, format_string, sz) \
  1582. fc_host_show_function(field, format_string, sz, ) \
  1583. fc_host_store_function(field) \
  1584. static FC_DEVICE_ATTR(host, field, S_IRUGO | S_IWUSR, \
  1585. show_fc_host_##field, \
  1586. store_fc_host_##field)
  1587. #define fc_host_rd_enum_attr(title, maxlen) \
  1588. static ssize_t \
  1589. show_fc_host_##title (struct device *dev, \
  1590. struct device_attribute *attr, char *buf) \
  1591. { \
  1592. struct Scsi_Host *shost = transport_class_to_shost(dev); \
  1593. struct fc_internal *i = to_fc_internal(shost->transportt); \
  1594. const char *name; \
  1595. if (i->f->get_host_##title) \
  1596. i->f->get_host_##title(shost); \
  1597. name = get_fc_##title##_name(fc_host_##title(shost)); \
  1598. if (!name) \
  1599. return -EINVAL; \
  1600. return snprintf(buf, maxlen, "%s\n", name); \
  1601. } \
  1602. static FC_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
  1603. #define SETUP_HOST_ATTRIBUTE_RD(field) \
  1604. i->private_host_attrs[count] = device_attr_host_##field; \
  1605. i->private_host_attrs[count].attr.mode = S_IRUGO; \
  1606. i->private_host_attrs[count].store = NULL; \
  1607. i->host_attrs[count] = &i->private_host_attrs[count]; \
  1608. if (i->f->show_host_##field) \
  1609. count++
  1610. #define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
  1611. i->private_host_attrs[count] = device_attr_host_##field; \
  1612. i->private_host_attrs[count].attr.mode = S_IRUGO; \
  1613. i->private_host_attrs[count].store = NULL; \
  1614. i->host_attrs[count] = &i->private_host_attrs[count]; \
  1615. count++
  1616. #define SETUP_HOST_ATTRIBUTE_RW(field) \
  1617. i->private_host_attrs[count] = device_attr_host_##field; \
  1618. if (!i->f->set_host_##field) { \
  1619. i->private_host_attrs[count].attr.mode = S_IRUGO; \
  1620. i->private_host_attrs[count].store = NULL; \
  1621. } \
  1622. i->host_attrs[count] = &i->private_host_attrs[count]; \
  1623. if (i->f->show_host_##field) \
  1624. count++
  1625. #define fc_private_host_show_function(field, format_string, sz, cast) \
  1626. static ssize_t \
  1627. show_fc_host_##field (struct device *dev, \
  1628. struct device_attribute *attr, char *buf) \
  1629. { \
  1630. struct Scsi_Host *shost = transport_class_to_shost(dev); \
  1631. return snprintf(buf, sz, format_string, cast fc_host_##field(shost)); \
  1632. }
  1633. #define fc_private_host_rd_attr(field, format_string, sz) \
  1634. fc_private_host_show_function(field, format_string, sz, ) \
  1635. static FC_DEVICE_ATTR(host, field, S_IRUGO, \
  1636. show_fc_host_##field, NULL)
  1637. #define fc_private_host_rd_attr_cast(field, format_string, sz, cast) \
  1638. fc_private_host_show_function(field, format_string, sz, (cast)) \
  1639. static FC_DEVICE_ATTR(host, field, S_IRUGO, \
  1640. show_fc_host_##field, NULL)
  1641. #define SETUP_PRIVATE_HOST_ATTRIBUTE_RD(field) \
  1642. i->private_host_attrs[count] = device_attr_host_##field; \
  1643. i->private_host_attrs[count].attr.mode = S_IRUGO; \
  1644. i->private_host_attrs[count].store = NULL; \
  1645. i->host_attrs[count] = &i->private_host_attrs[count]; \
  1646. count++
  1647. #define SETUP_PRIVATE_HOST_ATTRIBUTE_RW(field) \
  1648. { \
  1649. i->private_host_attrs[count] = device_attr_host_##field; \
  1650. i->host_attrs[count] = &i->private_host_attrs[count]; \
  1651. count++; \
  1652. }
  1653. /* Fixed Host Attributes */
  1654. static ssize_t
  1655. show_fc_host_supported_classes (struct device *dev,
  1656. struct device_attribute *attr, char *buf)
  1657. {
  1658. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1659. if (fc_host_supported_classes(shost) == FC_COS_UNSPECIFIED)
  1660. return snprintf(buf, 20, "unspecified\n");
  1661. return get_fc_cos_names(fc_host_supported_classes(shost), buf);
  1662. }
  1663. static FC_DEVICE_ATTR(host, supported_classes, S_IRUGO,
  1664. show_fc_host_supported_classes, NULL);
  1665. static ssize_t
  1666. show_fc_host_supported_fc4s (struct device *dev,
  1667. struct device_attribute *attr, char *buf)
  1668. {
  1669. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1670. return (ssize_t)show_fc_fc4s(buf, fc_host_supported_fc4s(shost));
  1671. }
  1672. static FC_DEVICE_ATTR(host, supported_fc4s, S_IRUGO,
  1673. show_fc_host_supported_fc4s, NULL);
  1674. static ssize_t
  1675. show_fc_host_supported_speeds (struct device *dev,
  1676. struct device_attribute *attr, char *buf)
  1677. {
  1678. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1679. if (fc_host_supported_speeds(shost) == FC_PORTSPEED_UNKNOWN)
  1680. return snprintf(buf, 20, "unknown\n");
  1681. return get_fc_port_speed_names(fc_host_supported_speeds(shost), buf);
  1682. }
  1683. static FC_DEVICE_ATTR(host, supported_speeds, S_IRUGO,
  1684. show_fc_host_supported_speeds, NULL);
  1685. fc_private_host_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
  1686. fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
  1687. fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
  1688. unsigned long long);
  1689. fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
  1690. fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
  1691. fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
  1692. fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
  1693. fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
  1694. fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
  1695. fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
  1696. fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
  1697. fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
  1698. fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
  1699. /* Dynamic Host Attributes */
  1700. static ssize_t
  1701. show_fc_host_active_fc4s (struct device *dev,
  1702. struct device_attribute *attr, char *buf)
  1703. {
  1704. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1705. struct fc_internal *i = to_fc_internal(shost->transportt);
  1706. if (i->f->get_host_active_fc4s)
  1707. i->f->get_host_active_fc4s(shost);
  1708. return (ssize_t)show_fc_fc4s(buf, fc_host_active_fc4s(shost));
  1709. }
  1710. static FC_DEVICE_ATTR(host, active_fc4s, S_IRUGO,
  1711. show_fc_host_active_fc4s, NULL);
  1712. static ssize_t
  1713. show_fc_host_speed (struct device *dev,
  1714. struct device_attribute *attr, char *buf)
  1715. {
  1716. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1717. struct fc_internal *i = to_fc_internal(shost->transportt);
  1718. if (i->f->get_host_speed)
  1719. i->f->get_host_speed(shost);
  1720. if (fc_host_speed(shost) == FC_PORTSPEED_UNKNOWN)
  1721. return snprintf(buf, 20, "unknown\n");
  1722. return get_fc_port_speed_names(fc_host_speed(shost), buf);
  1723. }
  1724. static FC_DEVICE_ATTR(host, speed, S_IRUGO,
  1725. show_fc_host_speed, NULL);
  1726. fc_host_rd_attr(port_id, "0x%06x\n", 20);
  1727. fc_host_rd_enum_attr(port_type, FC_PORTTYPE_MAX_NAMELEN);
  1728. fc_host_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
  1729. fc_host_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
  1730. fc_host_rd_attr(symbolic_name, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
  1731. fc_private_host_show_function(system_hostname, "%s\n",
  1732. FC_SYMBOLIC_NAME_SIZE + 1, )
  1733. fc_host_store_str_function(system_hostname, FC_SYMBOLIC_NAME_SIZE)
  1734. static FC_DEVICE_ATTR(host, system_hostname, S_IRUGO | S_IWUSR,
  1735. show_fc_host_system_hostname, store_fc_host_system_hostname);
  1736. /* Private Host Attributes */
  1737. static ssize_t
  1738. show_fc_private_host_tgtid_bind_type(struct device *dev,
  1739. struct device_attribute *attr, char *buf)
  1740. {
  1741. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1742. const char *name;
  1743. name = get_fc_tgtid_bind_type_name(fc_host_tgtid_bind_type(shost));
  1744. if (!name)
  1745. return -EINVAL;
  1746. return snprintf(buf, FC_BINDTYPE_MAX_NAMELEN, "%s\n", name);
  1747. }
  1748. #define get_list_head_entry(pos, head, member) \
  1749. pos = list_entry((head)->next, typeof(*pos), member)
  1750. static ssize_t
  1751. store_fc_private_host_tgtid_bind_type(struct device *dev,
  1752. struct device_attribute *attr, const char *buf, size_t count)
  1753. {
  1754. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1755. struct fc_rport *rport;
  1756. enum fc_tgtid_binding_type val;
  1757. unsigned long flags;
  1758. if (get_fc_tgtid_bind_type_match(buf, &val))
  1759. return -EINVAL;
  1760. /* if changing bind type, purge all unused consistent bindings */
  1761. if (val != fc_host_tgtid_bind_type(shost)) {
  1762. spin_lock_irqsave(shost->host_lock, flags);
  1763. while (!list_empty(&fc_host_rport_bindings(shost))) {
  1764. get_list_head_entry(rport,
  1765. &fc_host_rport_bindings(shost), peers);
  1766. list_del(&rport->peers);
  1767. rport->port_state = FC_PORTSTATE_DELETED;
  1768. fc_queue_work(shost, &rport->rport_delete_work);
  1769. }
  1770. spin_unlock_irqrestore(shost->host_lock, flags);
  1771. }
  1772. fc_host_tgtid_bind_type(shost) = val;
  1773. return count;
  1774. }
  1775. static FC_DEVICE_ATTR(host, tgtid_bind_type, S_IRUGO | S_IWUSR,
  1776. show_fc_private_host_tgtid_bind_type,
  1777. store_fc_private_host_tgtid_bind_type);
  1778. static ssize_t
  1779. store_fc_private_host_issue_lip(struct device *dev,
  1780. struct device_attribute *attr, const char *buf, size_t count)
  1781. {
  1782. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1783. struct fc_internal *i = to_fc_internal(shost->transportt);
  1784. int ret;
  1785. /* ignore any data value written to the attribute */
  1786. if (i->f->issue_fc_host_lip) {
  1787. ret = i->f->issue_fc_host_lip(shost);
  1788. return ret ? ret: count;
  1789. }
  1790. return -ENOENT;
  1791. }
  1792. static FC_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
  1793. store_fc_private_host_issue_lip);
  1794. static ssize_t
  1795. store_fc_private_host_dev_loss_tmo(struct device *dev,
  1796. struct device_attribute *attr,
  1797. const char *buf, size_t count)
  1798. {
  1799. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1800. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  1801. struct fc_rport *rport;
  1802. unsigned long val, flags;
  1803. int rc;
  1804. rc = fc_str_to_dev_loss(buf, &val);
  1805. if (rc)
  1806. return rc;
  1807. fc_host_dev_loss_tmo(shost) = val;
  1808. spin_lock_irqsave(shost->host_lock, flags);
  1809. list_for_each_entry(rport, &fc_host->rports, peers)
  1810. fc_rport_set_dev_loss_tmo(rport, val);
  1811. spin_unlock_irqrestore(shost->host_lock, flags);
  1812. return count;
  1813. }
  1814. fc_private_host_show_function(dev_loss_tmo, "%d\n", 20, );
  1815. static FC_DEVICE_ATTR(host, dev_loss_tmo, S_IRUGO | S_IWUSR,
  1816. show_fc_host_dev_loss_tmo,
  1817. store_fc_private_host_dev_loss_tmo);
  1818. fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
  1819. /*
  1820. * Host Statistics Management
  1821. */
  1822. /* Show a given attribute in the statistics group */
  1823. static ssize_t
  1824. fc_stat_show(const struct device *dev, char *buf, unsigned long offset)
  1825. {
  1826. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1827. struct fc_internal *i = to_fc_internal(shost->transportt);
  1828. struct fc_host_statistics *stats;
  1829. ssize_t ret = -ENOENT;
  1830. if (offset > sizeof(struct fc_host_statistics) ||
  1831. offset % sizeof(u64) != 0)
  1832. WARN_ON(1);
  1833. if (i->f->get_fc_host_stats) {
  1834. stats = (i->f->get_fc_host_stats)(shost);
  1835. if (stats)
  1836. ret = snprintf(buf, 20, "0x%llx\n",
  1837. (unsigned long long)*(u64 *)(((u8 *) stats) + offset));
  1838. }
  1839. return ret;
  1840. }
  1841. /* generate a read-only statistics attribute */
  1842. #define fc_host_statistic(name) \
  1843. static ssize_t show_fcstat_##name(struct device *cd, \
  1844. struct device_attribute *attr, \
  1845. char *buf) \
  1846. { \
  1847. return fc_stat_show(cd, buf, \
  1848. offsetof(struct fc_host_statistics, name)); \
  1849. } \
  1850. static FC_DEVICE_ATTR(host, name, S_IRUGO, show_fcstat_##name, NULL)
  1851. fc_host_statistic(seconds_since_last_reset);
  1852. fc_host_statistic(tx_frames);
  1853. fc_host_statistic(tx_words);
  1854. fc_host_statistic(rx_frames);
  1855. fc_host_statistic(rx_words);
  1856. fc_host_statistic(lip_count);
  1857. fc_host_statistic(nos_count);
  1858. fc_host_statistic(error_frames);
  1859. fc_host_statistic(dumped_frames);
  1860. fc_host_statistic(link_failure_count);
  1861. fc_host_statistic(loss_of_sync_count);
  1862. fc_host_statistic(loss_of_signal_count);
  1863. fc_host_statistic(prim_seq_protocol_err_count);
  1864. fc_host_statistic(invalid_tx_word_count);
  1865. fc_host_statistic(invalid_crc_count);
  1866. fc_host_statistic(fcp_input_requests);
  1867. fc_host_statistic(fcp_output_requests);
  1868. fc_host_statistic(fcp_control_requests);
  1869. fc_host_statistic(fcp_input_megabytes);
  1870. fc_host_statistic(fcp_output_megabytes);
  1871. fc_host_statistic(fcp_packet_alloc_failures);
  1872. fc_host_statistic(fcp_packet_aborts);
  1873. fc_host_statistic(fcp_frame_alloc_failures);
  1874. fc_host_statistic(fc_no_free_exch);
  1875. fc_host_statistic(fc_no_free_exch_xid);
  1876. fc_host_statistic(fc_xid_not_found);
  1877. fc_host_statistic(fc_xid_busy);
  1878. fc_host_statistic(fc_seq_not_found);
  1879. fc_host_statistic(fc_non_bls_resp);
  1880. fc_host_statistic(cn_sig_warn);
  1881. fc_host_statistic(cn_sig_alarm);
  1882. #define fc_host_fpin_statistic(name) \
  1883. static ssize_t fc_host_fpinstat_##name(struct device *cd, \
  1884. struct device_attribute *attr, \
  1885. char *buf) \
  1886. { \
  1887. struct Scsi_Host *shost = transport_class_to_shost(cd); \
  1888. struct fc_host_attrs *fc_host = shost_to_fc_host(shost); \
  1889. \
  1890. return snprintf(buf, 20, "0x%llx\n", fc_host->fpin_stats.name); \
  1891. } \
  1892. static FC_DEVICE_ATTR(host, fpin_##name, 0444, fc_host_fpinstat_##name, NULL)
  1893. fc_host_fpin_statistic(dn);
  1894. fc_host_fpin_statistic(dn_unknown);
  1895. fc_host_fpin_statistic(dn_timeout);
  1896. fc_host_fpin_statistic(dn_unable_to_route);
  1897. fc_host_fpin_statistic(dn_device_specific);
  1898. fc_host_fpin_statistic(cn);
  1899. fc_host_fpin_statistic(cn_clear);
  1900. fc_host_fpin_statistic(cn_lost_credit);
  1901. fc_host_fpin_statistic(cn_credit_stall);
  1902. fc_host_fpin_statistic(cn_oversubscription);
  1903. fc_host_fpin_statistic(cn_device_specific);
  1904. fc_host_fpin_statistic(li);
  1905. fc_host_fpin_statistic(li_failure_unknown);
  1906. fc_host_fpin_statistic(li_link_failure_count);
  1907. fc_host_fpin_statistic(li_loss_of_sync_count);
  1908. fc_host_fpin_statistic(li_loss_of_signals_count);
  1909. fc_host_fpin_statistic(li_prim_seq_err_count);
  1910. fc_host_fpin_statistic(li_invalid_tx_word_count);
  1911. fc_host_fpin_statistic(li_invalid_crc_count);
  1912. fc_host_fpin_statistic(li_device_specific);
  1913. static ssize_t
  1914. fc_reset_statistics(struct device *dev, struct device_attribute *attr,
  1915. const char *buf, size_t count)
  1916. {
  1917. struct Scsi_Host *shost = transport_class_to_shost(dev);
  1918. struct fc_internal *i = to_fc_internal(shost->transportt);
  1919. /* ignore any data value written to the attribute */
  1920. if (i->f->reset_fc_host_stats) {
  1921. i->f->reset_fc_host_stats(shost);
  1922. return count;
  1923. }
  1924. return -ENOENT;
  1925. }
  1926. static FC_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
  1927. fc_reset_statistics);
  1928. static struct attribute *fc_statistics_attrs[] = {
  1929. &device_attr_host_seconds_since_last_reset.attr,
  1930. &device_attr_host_tx_frames.attr,
  1931. &device_attr_host_tx_words.attr,
  1932. &device_attr_host_rx_frames.attr,
  1933. &device_attr_host_rx_words.attr,
  1934. &device_attr_host_lip_count.attr,
  1935. &device_attr_host_nos_count.attr,
  1936. &device_attr_host_error_frames.attr,
  1937. &device_attr_host_dumped_frames.attr,
  1938. &device_attr_host_link_failure_count.attr,
  1939. &device_attr_host_loss_of_sync_count.attr,
  1940. &device_attr_host_loss_of_signal_count.attr,
  1941. &device_attr_host_prim_seq_protocol_err_count.attr,
  1942. &device_attr_host_invalid_tx_word_count.attr,
  1943. &device_attr_host_invalid_crc_count.attr,
  1944. &device_attr_host_fcp_input_requests.attr,
  1945. &device_attr_host_fcp_output_requests.attr,
  1946. &device_attr_host_fcp_control_requests.attr,
  1947. &device_attr_host_fcp_input_megabytes.attr,
  1948. &device_attr_host_fcp_output_megabytes.attr,
  1949. &device_attr_host_fcp_packet_alloc_failures.attr,
  1950. &device_attr_host_fcp_packet_aborts.attr,
  1951. &device_attr_host_fcp_frame_alloc_failures.attr,
  1952. &device_attr_host_fc_no_free_exch.attr,
  1953. &device_attr_host_fc_no_free_exch_xid.attr,
  1954. &device_attr_host_fc_xid_not_found.attr,
  1955. &device_attr_host_fc_xid_busy.attr,
  1956. &device_attr_host_fc_seq_not_found.attr,
  1957. &device_attr_host_fc_non_bls_resp.attr,
  1958. &device_attr_host_cn_sig_warn.attr,
  1959. &device_attr_host_cn_sig_alarm.attr,
  1960. &device_attr_host_reset_statistics.attr,
  1961. &device_attr_host_fpin_dn.attr,
  1962. &device_attr_host_fpin_dn_unknown.attr,
  1963. &device_attr_host_fpin_dn_timeout.attr,
  1964. &device_attr_host_fpin_dn_unable_to_route.attr,
  1965. &device_attr_host_fpin_dn_device_specific.attr,
  1966. &device_attr_host_fpin_li.attr,
  1967. &device_attr_host_fpin_li_failure_unknown.attr,
  1968. &device_attr_host_fpin_li_link_failure_count.attr,
  1969. &device_attr_host_fpin_li_loss_of_sync_count.attr,
  1970. &device_attr_host_fpin_li_loss_of_signals_count.attr,
  1971. &device_attr_host_fpin_li_prim_seq_err_count.attr,
  1972. &device_attr_host_fpin_li_invalid_tx_word_count.attr,
  1973. &device_attr_host_fpin_li_invalid_crc_count.attr,
  1974. &device_attr_host_fpin_li_device_specific.attr,
  1975. &device_attr_host_fpin_cn.attr,
  1976. &device_attr_host_fpin_cn_clear.attr,
  1977. &device_attr_host_fpin_cn_lost_credit.attr,
  1978. &device_attr_host_fpin_cn_credit_stall.attr,
  1979. &device_attr_host_fpin_cn_oversubscription.attr,
  1980. &device_attr_host_fpin_cn_device_specific.attr,
  1981. NULL
  1982. };
  1983. static struct attribute_group fc_statistics_group = {
  1984. .name = "statistics",
  1985. .attrs = fc_statistics_attrs,
  1986. };
  1987. /* Host Vport Attributes */
  1988. static int
  1989. fc_parse_wwn(const char *ns, u64 *nm)
  1990. {
  1991. unsigned int i, j;
  1992. u8 wwn[8];
  1993. memset(wwn, 0, sizeof(wwn));
  1994. /* Validate and store the new name */
  1995. for (i=0, j=0; i < 16; i++) {
  1996. int value;
  1997. value = hex_to_bin(*ns++);
  1998. if (value >= 0)
  1999. j = (j << 4) | value;
  2000. else
  2001. return -EINVAL;
  2002. if (i % 2) {
  2003. wwn[i/2] = j & 0xff;
  2004. j = 0;
  2005. }
  2006. }
  2007. *nm = wwn_to_u64(wwn);
  2008. return 0;
  2009. }
  2010. /*
  2011. * "Short-cut" sysfs variable to create a new vport on a FC Host.
  2012. * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
  2013. * will default to a NPIV-based FCP_Initiator; The WWNs are specified
  2014. * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
  2015. */
  2016. static ssize_t
  2017. store_fc_host_vport_create(struct device *dev, struct device_attribute *attr,
  2018. const char *buf, size_t count)
  2019. {
  2020. struct Scsi_Host *shost = transport_class_to_shost(dev);
  2021. struct fc_vport_identifiers vid;
  2022. struct fc_vport *vport;
  2023. unsigned int cnt=count;
  2024. int stat;
  2025. memset(&vid, 0, sizeof(vid));
  2026. /* count may include a LF at end of string */
  2027. if (buf[cnt-1] == '\n')
  2028. cnt--;
  2029. /* validate we have enough characters for WWPN */
  2030. if ((cnt != (16+1+16)) || (buf[16] != ':'))
  2031. return -EINVAL;
  2032. stat = fc_parse_wwn(&buf[0], &vid.port_name);
  2033. if (stat)
  2034. return stat;
  2035. stat = fc_parse_wwn(&buf[17], &vid.node_name);
  2036. if (stat)
  2037. return stat;
  2038. vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
  2039. vid.vport_type = FC_PORTTYPE_NPIV;
  2040. /* vid.symbolic_name is already zero/NULL's */
  2041. vid.disable = false; /* always enabled */
  2042. /* we only allow support on Channel 0 !!! */
  2043. stat = fc_vport_setup(shost, 0, &shost->shost_gendev, &vid, &vport);
  2044. return stat ? stat : count;
  2045. }
  2046. static FC_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
  2047. store_fc_host_vport_create);
  2048. /*
  2049. * "Short-cut" sysfs variable to delete a vport on a FC Host.
  2050. * Vport is identified by a string containing "<WWPN>:<WWNN>".
  2051. * The WWNs are specified as hex characters, and may *not* contain
  2052. * any prefixes (e.g. 0x, x, etc)
  2053. */
  2054. static ssize_t
  2055. store_fc_host_vport_delete(struct device *dev, struct device_attribute *attr,
  2056. const char *buf, size_t count)
  2057. {
  2058. struct Scsi_Host *shost = transport_class_to_shost(dev);
  2059. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  2060. struct fc_vport *vport;
  2061. u64 wwpn, wwnn;
  2062. unsigned long flags;
  2063. unsigned int cnt=count;
  2064. int stat, match;
  2065. /* count may include a LF at end of string */
  2066. if (buf[cnt-1] == '\n')
  2067. cnt--;
  2068. /* validate we have enough characters for WWPN */
  2069. if ((cnt != (16+1+16)) || (buf[16] != ':'))
  2070. return -EINVAL;
  2071. stat = fc_parse_wwn(&buf[0], &wwpn);
  2072. if (stat)
  2073. return stat;
  2074. stat = fc_parse_wwn(&buf[17], &wwnn);
  2075. if (stat)
  2076. return stat;
  2077. spin_lock_irqsave(shost->host_lock, flags);
  2078. match = 0;
  2079. /* we only allow support on Channel 0 !!! */
  2080. list_for_each_entry(vport, &fc_host->vports, peers) {
  2081. if ((vport->channel == 0) &&
  2082. (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
  2083. if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
  2084. break;
  2085. vport->flags |= FC_VPORT_DELETING;
  2086. match = 1;
  2087. break;
  2088. }
  2089. }
  2090. spin_unlock_irqrestore(shost->host_lock, flags);
  2091. if (!match)
  2092. return -ENODEV;
  2093. stat = fc_vport_terminate(vport);
  2094. return stat ? stat : count;
  2095. }
  2096. static FC_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
  2097. store_fc_host_vport_delete);
  2098. static int fc_host_match(struct attribute_container *cont,
  2099. struct device *dev)
  2100. {
  2101. struct Scsi_Host *shost;
  2102. struct fc_internal *i;
  2103. if (!scsi_is_host_device(dev))
  2104. return 0;
  2105. shost = dev_to_shost(dev);
  2106. if (!shost->transportt || shost->transportt->host_attrs.ac.class
  2107. != &fc_host_class.class)
  2108. return 0;
  2109. i = to_fc_internal(shost->transportt);
  2110. return &i->t.host_attrs.ac == cont;
  2111. }
  2112. static int fc_target_match(struct attribute_container *cont,
  2113. struct device *dev)
  2114. {
  2115. struct Scsi_Host *shost;
  2116. struct fc_internal *i;
  2117. if (!scsi_is_target_device(dev))
  2118. return 0;
  2119. shost = dev_to_shost(dev->parent);
  2120. if (!shost->transportt || shost->transportt->host_attrs.ac.class
  2121. != &fc_host_class.class)
  2122. return 0;
  2123. i = to_fc_internal(shost->transportt);
  2124. return &i->t.target_attrs.ac == cont;
  2125. }
  2126. static void fc_rport_dev_release(struct device *dev)
  2127. {
  2128. struct fc_rport *rport = dev_to_rport(dev);
  2129. put_device(dev->parent);
  2130. kfree(rport);
  2131. }
  2132. int scsi_is_fc_rport(const struct device *dev)
  2133. {
  2134. return dev->release == fc_rport_dev_release;
  2135. }
  2136. EXPORT_SYMBOL(scsi_is_fc_rport);
  2137. static int fc_rport_match(struct attribute_container *cont,
  2138. struct device *dev)
  2139. {
  2140. struct Scsi_Host *shost;
  2141. struct fc_internal *i;
  2142. if (!scsi_is_fc_rport(dev))
  2143. return 0;
  2144. shost = dev_to_shost(dev->parent);
  2145. if (!shost->transportt || shost->transportt->host_attrs.ac.class
  2146. != &fc_host_class.class)
  2147. return 0;
  2148. i = to_fc_internal(shost->transportt);
  2149. return &i->rport_attr_cont.ac == cont;
  2150. }
  2151. static void fc_vport_dev_release(struct device *dev)
  2152. {
  2153. struct fc_vport *vport = dev_to_vport(dev);
  2154. put_device(dev->parent); /* release kobj parent */
  2155. kfree(vport);
  2156. }
  2157. static int scsi_is_fc_vport(const struct device *dev)
  2158. {
  2159. return dev->release == fc_vport_dev_release;
  2160. }
  2161. static int fc_vport_match(struct attribute_container *cont,
  2162. struct device *dev)
  2163. {
  2164. struct fc_vport *vport;
  2165. struct Scsi_Host *shost;
  2166. struct fc_internal *i;
  2167. if (!scsi_is_fc_vport(dev))
  2168. return 0;
  2169. vport = dev_to_vport(dev);
  2170. shost = vport_to_shost(vport);
  2171. if (!shost->transportt || shost->transportt->host_attrs.ac.class
  2172. != &fc_host_class.class)
  2173. return 0;
  2174. i = to_fc_internal(shost->transportt);
  2175. return &i->vport_attr_cont.ac == cont;
  2176. }
  2177. /**
  2178. * fc_eh_timed_out - FC Transport I/O timeout intercept handler
  2179. * @scmd: The SCSI command which timed out
  2180. *
  2181. * This routine protects against error handlers getting invoked while a
  2182. * rport is in a blocked state, typically due to a temporarily loss of
  2183. * connectivity. If the error handlers are allowed to proceed, requests
  2184. * to abort i/o, reset the target, etc will likely fail as there is no way
  2185. * to communicate with the device to perform the requested function. These
  2186. * failures may result in the midlayer taking the device offline, requiring
  2187. * manual intervention to restore operation.
  2188. *
  2189. * This routine, called whenever an i/o times out, validates the state of
  2190. * the underlying rport. If the rport is blocked, it returns
  2191. * EH_RESET_TIMER, which will continue to reschedule the timeout.
  2192. * Eventually, either the device will return, or devloss_tmo will fire,
  2193. * and when the timeout then fires, it will be handled normally.
  2194. * If the rport is not blocked, normal error handling continues.
  2195. *
  2196. * Notes:
  2197. * This routine assumes no locks are held on entry.
  2198. */
  2199. enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd)
  2200. {
  2201. struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
  2202. if (rport->port_state == FC_PORTSTATE_BLOCKED)
  2203. return SCSI_EH_RESET_TIMER;
  2204. return SCSI_EH_NOT_HANDLED;
  2205. }
  2206. EXPORT_SYMBOL(fc_eh_timed_out);
  2207. /*
  2208. * Called by fc_user_scan to locate an rport on the shost that
  2209. * matches the channel and target id, and invoke scsi_scan_target()
  2210. * on the rport.
  2211. */
  2212. static void
  2213. fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
  2214. {
  2215. struct fc_rport *rport;
  2216. unsigned long flags;
  2217. spin_lock_irqsave(shost->host_lock, flags);
  2218. list_for_each_entry(rport, &fc_host_rports(shost), peers) {
  2219. if (rport->scsi_target_id == -1)
  2220. continue;
  2221. if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
  2222. (rport->port_state != FC_PORTSTATE_MARGINAL))
  2223. continue;
  2224. if ((channel == rport->channel) &&
  2225. (id == rport->scsi_target_id)) {
  2226. spin_unlock_irqrestore(shost->host_lock, flags);
  2227. scsi_scan_target(&rport->dev, channel, id, lun,
  2228. SCSI_SCAN_MANUAL);
  2229. return;
  2230. }
  2231. }
  2232. spin_unlock_irqrestore(shost->host_lock, flags);
  2233. }
  2234. /*
  2235. * Called via sysfs scan routines. Necessary, as the FC transport
  2236. * wants to place all target objects below the rport object. So this
  2237. * routine must invoke the scsi_scan_target() routine with the rport
  2238. * object as the parent.
  2239. */
  2240. static int
  2241. fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
  2242. {
  2243. uint chlo, chhi;
  2244. uint tgtlo, tgthi;
  2245. if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
  2246. ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
  2247. ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
  2248. return -EINVAL;
  2249. if (channel == SCAN_WILD_CARD) {
  2250. chlo = 0;
  2251. chhi = shost->max_channel + 1;
  2252. } else {
  2253. chlo = channel;
  2254. chhi = channel + 1;
  2255. }
  2256. if (id == SCAN_WILD_CARD) {
  2257. tgtlo = 0;
  2258. tgthi = shost->max_id;
  2259. } else {
  2260. tgtlo = id;
  2261. tgthi = id + 1;
  2262. }
  2263. for ( ; chlo < chhi; chlo++)
  2264. for ( ; tgtlo < tgthi; tgtlo++)
  2265. fc_user_scan_tgt(shost, chlo, tgtlo, lun);
  2266. return 0;
  2267. }
  2268. struct scsi_transport_template *
  2269. fc_attach_transport(struct fc_function_template *ft)
  2270. {
  2271. int count;
  2272. struct fc_internal *i = kzalloc(sizeof(struct fc_internal),
  2273. GFP_KERNEL);
  2274. if (unlikely(!i))
  2275. return NULL;
  2276. i->t.target_attrs.ac.attrs = &i->starget_attrs[0];
  2277. i->t.target_attrs.ac.class = &fc_transport_class.class;
  2278. i->t.target_attrs.ac.match = fc_target_match;
  2279. i->t.target_size = sizeof(struct fc_starget_attrs);
  2280. transport_container_register(&i->t.target_attrs);
  2281. i->t.host_attrs.ac.attrs = &i->host_attrs[0];
  2282. i->t.host_attrs.ac.class = &fc_host_class.class;
  2283. i->t.host_attrs.ac.match = fc_host_match;
  2284. i->t.host_size = sizeof(struct fc_host_attrs);
  2285. if (ft->get_fc_host_stats)
  2286. i->t.host_attrs.statistics = &fc_statistics_group;
  2287. transport_container_register(&i->t.host_attrs);
  2288. i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
  2289. i->rport_attr_cont.ac.class = &fc_rport_class.class;
  2290. i->rport_attr_cont.ac.match = fc_rport_match;
  2291. i->rport_attr_cont.statistics = &fc_rport_statistics_group;
  2292. transport_container_register(&i->rport_attr_cont);
  2293. i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
  2294. i->vport_attr_cont.ac.class = &fc_vport_class.class;
  2295. i->vport_attr_cont.ac.match = fc_vport_match;
  2296. transport_container_register(&i->vport_attr_cont);
  2297. i->f = ft;
  2298. /* Transport uses the shost workq for scsi scanning */
  2299. i->t.create_work_queue = 1;
  2300. i->t.user_scan = fc_user_scan;
  2301. /*
  2302. * Setup SCSI Target Attributes.
  2303. */
  2304. count = 0;
  2305. SETUP_STARGET_ATTRIBUTE_RD(node_name);
  2306. SETUP_STARGET_ATTRIBUTE_RD(port_name);
  2307. SETUP_STARGET_ATTRIBUTE_RD(port_id);
  2308. BUG_ON(count > FC_STARGET_NUM_ATTRS);
  2309. i->starget_attrs[count] = NULL;
  2310. /*
  2311. * Setup SCSI Host Attributes.
  2312. */
  2313. count=0;
  2314. SETUP_HOST_ATTRIBUTE_RD(node_name);
  2315. SETUP_HOST_ATTRIBUTE_RD(port_name);
  2316. SETUP_HOST_ATTRIBUTE_RD(permanent_port_name);
  2317. SETUP_HOST_ATTRIBUTE_RD(supported_classes);
  2318. SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
  2319. SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
  2320. SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
  2321. if (ft->vport_create) {
  2322. SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
  2323. SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
  2324. }
  2325. SETUP_HOST_ATTRIBUTE_RD(serial_number);
  2326. SETUP_HOST_ATTRIBUTE_RD(manufacturer);
  2327. SETUP_HOST_ATTRIBUTE_RD(model);
  2328. SETUP_HOST_ATTRIBUTE_RD(model_description);
  2329. SETUP_HOST_ATTRIBUTE_RD(hardware_version);
  2330. SETUP_HOST_ATTRIBUTE_RD(driver_version);
  2331. SETUP_HOST_ATTRIBUTE_RD(firmware_version);
  2332. SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
  2333. SETUP_HOST_ATTRIBUTE_RD(port_id);
  2334. SETUP_HOST_ATTRIBUTE_RD(port_type);
  2335. SETUP_HOST_ATTRIBUTE_RD(port_state);
  2336. SETUP_HOST_ATTRIBUTE_RD(active_fc4s);
  2337. SETUP_HOST_ATTRIBUTE_RD(speed);
  2338. SETUP_HOST_ATTRIBUTE_RD(fabric_name);
  2339. SETUP_HOST_ATTRIBUTE_RD(symbolic_name);
  2340. SETUP_HOST_ATTRIBUTE_RW(system_hostname);
  2341. /* Transport-managed attributes */
  2342. SETUP_PRIVATE_HOST_ATTRIBUTE_RW(dev_loss_tmo);
  2343. SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
  2344. if (ft->issue_fc_host_lip)
  2345. SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
  2346. if (ft->vport_create)
  2347. SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
  2348. if (ft->vport_delete)
  2349. SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
  2350. BUG_ON(count > FC_HOST_NUM_ATTRS);
  2351. i->host_attrs[count] = NULL;
  2352. /*
  2353. * Setup Remote Port Attributes.
  2354. */
  2355. count=0;
  2356. SETUP_RPORT_ATTRIBUTE_RD(maxframe_size);
  2357. SETUP_RPORT_ATTRIBUTE_RD(supported_classes);
  2358. SETUP_RPORT_ATTRIBUTE_RW(dev_loss_tmo);
  2359. SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(node_name);
  2360. SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
  2361. SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
  2362. SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
  2363. SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(port_state);
  2364. SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
  2365. SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
  2366. BUG_ON(count > FC_RPORT_NUM_ATTRS);
  2367. i->rport_attrs[count] = NULL;
  2368. /*
  2369. * Setup Virtual Port Attributes.
  2370. */
  2371. count=0;
  2372. SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
  2373. SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
  2374. SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
  2375. SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
  2376. SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
  2377. SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
  2378. SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
  2379. SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
  2380. SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
  2381. BUG_ON(count > FC_VPORT_NUM_ATTRS);
  2382. i->vport_attrs[count] = NULL;
  2383. return &i->t;
  2384. }
  2385. EXPORT_SYMBOL(fc_attach_transport);
  2386. void fc_release_transport(struct scsi_transport_template *t)
  2387. {
  2388. struct fc_internal *i = to_fc_internal(t);
  2389. transport_container_unregister(&i->t.target_attrs);
  2390. transport_container_unregister(&i->t.host_attrs);
  2391. transport_container_unregister(&i->rport_attr_cont);
  2392. transport_container_unregister(&i->vport_attr_cont);
  2393. kfree(i);
  2394. }
  2395. EXPORT_SYMBOL(fc_release_transport);
  2396. /**
  2397. * fc_queue_work - Queue work to the fc_host workqueue.
  2398. * @shost: Pointer to Scsi_Host bound to fc_host.
  2399. * @work: Work to queue for execution.
  2400. *
  2401. * Return value:
  2402. * 1 - work queued for execution
  2403. * 0 - work is already queued
  2404. * -EINVAL - work queue doesn't exist
  2405. */
  2406. static int
  2407. fc_queue_work(struct Scsi_Host *shost, struct work_struct *work)
  2408. {
  2409. if (unlikely(!fc_host_work_q(shost))) {
  2410. printk(KERN_ERR
  2411. "ERROR: FC host '%s' attempted to queue work, "
  2412. "when no workqueue created.\n", shost->hostt->name);
  2413. dump_stack();
  2414. return -EINVAL;
  2415. }
  2416. return queue_work(fc_host_work_q(shost), work);
  2417. }
  2418. /**
  2419. * fc_flush_work - Flush a fc_host's workqueue.
  2420. * @shost: Pointer to Scsi_Host bound to fc_host.
  2421. */
  2422. static void
  2423. fc_flush_work(struct Scsi_Host *shost)
  2424. {
  2425. if (!fc_host_work_q(shost)) {
  2426. printk(KERN_ERR
  2427. "ERROR: FC host '%s' attempted to flush work, "
  2428. "when no workqueue created.\n", shost->hostt->name);
  2429. dump_stack();
  2430. return;
  2431. }
  2432. flush_workqueue(fc_host_work_q(shost));
  2433. }
  2434. /**
  2435. * fc_queue_devloss_work - Schedule work for the fc_host devloss workqueue.
  2436. * @shost: Pointer to Scsi_Host bound to fc_host.
  2437. * @work: Work to queue for execution.
  2438. * @delay: jiffies to delay the work queuing
  2439. *
  2440. * Return value:
  2441. * 1 on success / 0 already queued / < 0 for error
  2442. */
  2443. static int
  2444. fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work,
  2445. unsigned long delay)
  2446. {
  2447. if (unlikely(!fc_host_devloss_work_q(shost))) {
  2448. printk(KERN_ERR
  2449. "ERROR: FC host '%s' attempted to queue work, "
  2450. "when no workqueue created.\n", shost->hostt->name);
  2451. dump_stack();
  2452. return -EINVAL;
  2453. }
  2454. return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay);
  2455. }
  2456. /**
  2457. * fc_flush_devloss - Flush a fc_host's devloss workqueue.
  2458. * @shost: Pointer to Scsi_Host bound to fc_host.
  2459. */
  2460. static void
  2461. fc_flush_devloss(struct Scsi_Host *shost)
  2462. {
  2463. if (!fc_host_devloss_work_q(shost)) {
  2464. printk(KERN_ERR
  2465. "ERROR: FC host '%s' attempted to flush work, "
  2466. "when no workqueue created.\n", shost->hostt->name);
  2467. dump_stack();
  2468. return;
  2469. }
  2470. flush_workqueue(fc_host_devloss_work_q(shost));
  2471. }
  2472. /**
  2473. * fc_remove_host - called to terminate any fc_transport-related elements for a scsi host.
  2474. * @shost: Which &Scsi_Host
  2475. *
  2476. * This routine is expected to be called immediately preceding the
  2477. * a driver's call to scsi_remove_host().
  2478. *
  2479. * WARNING: A driver utilizing the fc_transport, which fails to call
  2480. * this routine prior to scsi_remove_host(), will leave dangling
  2481. * objects in /sys/class/fc_remote_ports. Access to any of these
  2482. * objects can result in a system crash !!!
  2483. *
  2484. * Notes:
  2485. * This routine assumes no locks are held on entry.
  2486. */
  2487. void
  2488. fc_remove_host(struct Scsi_Host *shost)
  2489. {
  2490. struct fc_vport *vport = NULL, *next_vport = NULL;
  2491. struct fc_rport *rport = NULL, *next_rport = NULL;
  2492. struct workqueue_struct *work_q;
  2493. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  2494. unsigned long flags;
  2495. spin_lock_irqsave(shost->host_lock, flags);
  2496. /* Remove any vports */
  2497. list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
  2498. vport->flags |= FC_VPORT_DELETING;
  2499. fc_queue_work(shost, &vport->vport_delete_work);
  2500. }
  2501. /* Remove any remote ports */
  2502. list_for_each_entry_safe(rport, next_rport,
  2503. &fc_host->rports, peers) {
  2504. list_del(&rport->peers);
  2505. rport->port_state = FC_PORTSTATE_DELETED;
  2506. fc_queue_work(shost, &rport->rport_delete_work);
  2507. }
  2508. list_for_each_entry_safe(rport, next_rport,
  2509. &fc_host->rport_bindings, peers) {
  2510. list_del(&rport->peers);
  2511. rport->port_state = FC_PORTSTATE_DELETED;
  2512. fc_queue_work(shost, &rport->rport_delete_work);
  2513. }
  2514. spin_unlock_irqrestore(shost->host_lock, flags);
  2515. /* flush all scan work items */
  2516. scsi_flush_work(shost);
  2517. /* flush all stgt delete, and rport delete work items, then kill it */
  2518. if (fc_host->work_q) {
  2519. work_q = fc_host->work_q;
  2520. fc_host->work_q = NULL;
  2521. destroy_workqueue(work_q);
  2522. }
  2523. /* flush all devloss work items, then kill it */
  2524. if (fc_host->devloss_work_q) {
  2525. work_q = fc_host->devloss_work_q;
  2526. fc_host->devloss_work_q = NULL;
  2527. destroy_workqueue(work_q);
  2528. }
  2529. }
  2530. EXPORT_SYMBOL(fc_remove_host);
  2531. static void fc_terminate_rport_io(struct fc_rport *rport)
  2532. {
  2533. struct Scsi_Host *shost = rport_to_shost(rport);
  2534. struct fc_internal *i = to_fc_internal(shost->transportt);
  2535. /* Involve the LLDD if possible to terminate all io on the rport. */
  2536. if (i->f->terminate_rport_io)
  2537. i->f->terminate_rport_io(rport);
  2538. /*
  2539. * Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
  2540. */
  2541. scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
  2542. }
  2543. /**
  2544. * fc_starget_delete - called to delete the scsi descendants of an rport
  2545. * @work: remote port to be operated on.
  2546. *
  2547. * Deletes target and all sdevs.
  2548. */
  2549. static void
  2550. fc_starget_delete(struct work_struct *work)
  2551. {
  2552. struct fc_rport *rport =
  2553. container_of(work, struct fc_rport, stgt_delete_work);
  2554. fc_terminate_rport_io(rport);
  2555. scsi_remove_target(&rport->dev);
  2556. }
  2557. /**
  2558. * fc_rport_final_delete - finish rport termination and delete it.
  2559. * @work: remote port to be deleted.
  2560. */
  2561. static void
  2562. fc_rport_final_delete(struct work_struct *work)
  2563. {
  2564. struct fc_rport *rport =
  2565. container_of(work, struct fc_rport, rport_delete_work);
  2566. struct device *dev = &rport->dev;
  2567. struct Scsi_Host *shost = rport_to_shost(rport);
  2568. struct fc_internal *i = to_fc_internal(shost->transportt);
  2569. unsigned long flags;
  2570. int do_callback = 0;
  2571. fc_terminate_rport_io(rport);
  2572. /*
  2573. * if a scan is pending, flush the SCSI Host work_q so that
  2574. * that we can reclaim the rport scan work element.
  2575. */
  2576. if (rport->flags & FC_RPORT_SCAN_PENDING)
  2577. scsi_flush_work(shost);
  2578. /*
  2579. * Cancel any outstanding timers. These should really exist
  2580. * only when rmmod'ing the LLDD and we're asking for
  2581. * immediate termination of the rports
  2582. */
  2583. spin_lock_irqsave(shost->host_lock, flags);
  2584. if (rport->flags & FC_RPORT_DEVLOSS_PENDING) {
  2585. spin_unlock_irqrestore(shost->host_lock, flags);
  2586. if (!cancel_delayed_work(&rport->fail_io_work))
  2587. fc_flush_devloss(shost);
  2588. if (!cancel_delayed_work(&rport->dev_loss_work))
  2589. fc_flush_devloss(shost);
  2590. cancel_work_sync(&rport->scan_work);
  2591. spin_lock_irqsave(shost->host_lock, flags);
  2592. rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
  2593. }
  2594. spin_unlock_irqrestore(shost->host_lock, flags);
  2595. /* Delete SCSI target and sdevs */
  2596. if (rport->scsi_target_id != -1)
  2597. fc_starget_delete(&rport->stgt_delete_work);
  2598. /*
  2599. * Notify the driver that the rport is now dead. The LLDD will
  2600. * also guarantee that any communication to the rport is terminated
  2601. *
  2602. * Avoid this call if we already called it when we preserved the
  2603. * rport for the binding.
  2604. */
  2605. spin_lock_irqsave(shost->host_lock, flags);
  2606. if (!(rport->flags & FC_RPORT_DEVLOSS_CALLBK_DONE) &&
  2607. (i->f->dev_loss_tmo_callbk)) {
  2608. rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
  2609. do_callback = 1;
  2610. }
  2611. spin_unlock_irqrestore(shost->host_lock, flags);
  2612. if (do_callback)
  2613. i->f->dev_loss_tmo_callbk(rport);
  2614. fc_bsg_remove(rport->rqst_q);
  2615. transport_remove_device(dev);
  2616. device_del(dev);
  2617. transport_destroy_device(dev);
  2618. scsi_host_put(shost); /* for fc_host->rport list */
  2619. put_device(dev); /* for self-reference */
  2620. }
  2621. /**
  2622. * fc_remote_port_create - allocates and creates a remote FC port.
  2623. * @shost: scsi host the remote port is connected to.
  2624. * @channel: Channel on shost port connected to.
  2625. * @ids: The world wide names, fc address, and FC4 port
  2626. * roles for the remote port.
  2627. *
  2628. * Allocates and creates the remoter port structure, including the
  2629. * class and sysfs creation.
  2630. *
  2631. * Notes:
  2632. * This routine assumes no locks are held on entry.
  2633. */
  2634. static struct fc_rport *
  2635. fc_remote_port_create(struct Scsi_Host *shost, int channel,
  2636. struct fc_rport_identifiers *ids)
  2637. {
  2638. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  2639. struct fc_internal *fci = to_fc_internal(shost->transportt);
  2640. struct fc_rport *rport;
  2641. struct device *dev;
  2642. unsigned long flags;
  2643. int error;
  2644. size_t size;
  2645. size = (sizeof(struct fc_rport) + fci->f->dd_fcrport_size);
  2646. rport = kzalloc(size, GFP_KERNEL);
  2647. if (unlikely(!rport)) {
  2648. printk(KERN_ERR "%s: allocation failure\n", __func__);
  2649. return NULL;
  2650. }
  2651. rport->maxframe_size = -1;
  2652. rport->supported_classes = FC_COS_UNSPECIFIED;
  2653. rport->dev_loss_tmo = fc_host->dev_loss_tmo;
  2654. memcpy(&rport->node_name, &ids->node_name, sizeof(rport->node_name));
  2655. memcpy(&rport->port_name, &ids->port_name, sizeof(rport->port_name));
  2656. rport->port_id = ids->port_id;
  2657. rport->roles = ids->roles;
  2658. rport->port_state = FC_PORTSTATE_ONLINE;
  2659. if (fci->f->dd_fcrport_size)
  2660. rport->dd_data = &rport[1];
  2661. rport->channel = channel;
  2662. rport->fast_io_fail_tmo = -1;
  2663. INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport);
  2664. INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io);
  2665. INIT_WORK(&rport->scan_work, fc_scsi_scan_rport);
  2666. INIT_WORK(&rport->stgt_delete_work, fc_starget_delete);
  2667. INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete);
  2668. spin_lock_irqsave(shost->host_lock, flags);
  2669. rport->number = fc_host->next_rport_number++;
  2670. if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) ||
  2671. (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR))
  2672. rport->scsi_target_id = fc_host->next_target_id++;
  2673. else
  2674. rport->scsi_target_id = -1;
  2675. list_add_tail(&rport->peers, &fc_host->rports);
  2676. scsi_host_get(shost); /* for fc_host->rport list */
  2677. spin_unlock_irqrestore(shost->host_lock, flags);
  2678. dev = &rport->dev;
  2679. device_initialize(dev); /* takes self reference */
  2680. dev->parent = get_device(&shost->shost_gendev); /* parent reference */
  2681. dev->release = fc_rport_dev_release;
  2682. dev_set_name(dev, "rport-%d:%d-%d",
  2683. shost->host_no, channel, rport->number);
  2684. transport_setup_device(dev);
  2685. error = device_add(dev);
  2686. if (error) {
  2687. printk(KERN_ERR "FC Remote Port device_add failed\n");
  2688. goto delete_rport;
  2689. }
  2690. transport_add_device(dev);
  2691. transport_configure_device(dev);
  2692. fc_bsg_rportadd(shost, rport);
  2693. /* ignore any bsg add error - we just can't do sgio */
  2694. if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
  2695. /* initiate a scan of the target */
  2696. rport->flags |= FC_RPORT_SCAN_PENDING;
  2697. scsi_queue_work(shost, &rport->scan_work);
  2698. }
  2699. return rport;
  2700. delete_rport:
  2701. transport_destroy_device(dev);
  2702. spin_lock_irqsave(shost->host_lock, flags);
  2703. list_del(&rport->peers);
  2704. scsi_host_put(shost); /* for fc_host->rport list */
  2705. spin_unlock_irqrestore(shost->host_lock, flags);
  2706. put_device(dev->parent);
  2707. kfree(rport);
  2708. return NULL;
  2709. }
  2710. /**
  2711. * fc_remote_port_add - notify fc transport of the existence of a remote FC port.
  2712. * @shost: scsi host the remote port is connected to.
  2713. * @channel: Channel on shost port connected to.
  2714. * @ids: The world wide names, fc address, and FC4 port
  2715. * roles for the remote port.
  2716. *
  2717. * The LLDD calls this routine to notify the transport of the existence
  2718. * of a remote port. The LLDD provides the unique identifiers (wwpn,wwn)
  2719. * of the port, it's FC address (port_id), and the FC4 roles that are
  2720. * active for the port.
  2721. *
  2722. * For ports that are FCP targets (aka scsi targets), the FC transport
  2723. * maintains consistent target id bindings on behalf of the LLDD.
  2724. * A consistent target id binding is an assignment of a target id to
  2725. * a remote port identifier, which persists while the scsi host is
  2726. * attached. The remote port can disappear, then later reappear, and
  2727. * it's target id assignment remains the same. This allows for shifts
  2728. * in FC addressing (if binding by wwpn or wwnn) with no apparent
  2729. * changes to the scsi subsystem which is based on scsi host number and
  2730. * target id values. Bindings are only valid during the attachment of
  2731. * the scsi host. If the host detaches, then later re-attaches, target
  2732. * id bindings may change.
  2733. *
  2734. * This routine is responsible for returning a remote port structure.
  2735. * The routine will search the list of remote ports it maintains
  2736. * internally on behalf of consistent target id mappings. If found, the
  2737. * remote port structure will be reused. Otherwise, a new remote port
  2738. * structure will be allocated.
  2739. *
  2740. * Whenever a remote port is allocated, a new fc_remote_port class
  2741. * device is created.
  2742. *
  2743. * Should not be called from interrupt context.
  2744. *
  2745. * Notes:
  2746. * This routine assumes no locks are held on entry.
  2747. */
  2748. struct fc_rport *
  2749. fc_remote_port_add(struct Scsi_Host *shost, int channel,
  2750. struct fc_rport_identifiers *ids)
  2751. {
  2752. struct fc_internal *fci = to_fc_internal(shost->transportt);
  2753. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  2754. struct fc_rport *rport;
  2755. unsigned long flags;
  2756. int match = 0;
  2757. /* ensure any stgt delete functions are done */
  2758. fc_flush_work(shost);
  2759. /*
  2760. * Search the list of "active" rports, for an rport that has been
  2761. * deleted, but we've held off the real delete while the target
  2762. * is in a "blocked" state.
  2763. */
  2764. spin_lock_irqsave(shost->host_lock, flags);
  2765. list_for_each_entry(rport, &fc_host->rports, peers) {
  2766. if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
  2767. rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
  2768. (rport->channel == channel)) {
  2769. switch (fc_host->tgtid_bind_type) {
  2770. case FC_TGTID_BIND_BY_WWPN:
  2771. case FC_TGTID_BIND_NONE:
  2772. if (rport->port_name == ids->port_name)
  2773. match = 1;
  2774. break;
  2775. case FC_TGTID_BIND_BY_WWNN:
  2776. if (rport->node_name == ids->node_name)
  2777. match = 1;
  2778. break;
  2779. case FC_TGTID_BIND_BY_ID:
  2780. if (rport->port_id == ids->port_id)
  2781. match = 1;
  2782. break;
  2783. }
  2784. if (match) {
  2785. memcpy(&rport->node_name, &ids->node_name,
  2786. sizeof(rport->node_name));
  2787. memcpy(&rport->port_name, &ids->port_name,
  2788. sizeof(rport->port_name));
  2789. rport->port_id = ids->port_id;
  2790. rport->port_state = FC_PORTSTATE_ONLINE;
  2791. rport->roles = ids->roles;
  2792. spin_unlock_irqrestore(shost->host_lock, flags);
  2793. if (fci->f->dd_fcrport_size)
  2794. memset(rport->dd_data, 0,
  2795. fci->f->dd_fcrport_size);
  2796. /*
  2797. * If we were not a target, cancel the
  2798. * io terminate and rport timers, and
  2799. * we're done.
  2800. *
  2801. * If we were a target, but our new role
  2802. * doesn't indicate a target, leave the
  2803. * timers running expecting the role to
  2804. * change as the target fully logs in. If
  2805. * it doesn't, the target will be torn down.
  2806. *
  2807. * If we were a target, and our role shows
  2808. * we're still a target, cancel the timers
  2809. * and kick off a scan.
  2810. */
  2811. /* was a target, not in roles */
  2812. if ((rport->scsi_target_id != -1) &&
  2813. (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
  2814. return rport;
  2815. /*
  2816. * Stop the fail io and dev_loss timers.
  2817. * If they flush, the port_state will
  2818. * be checked and will NOOP the function.
  2819. */
  2820. if (!cancel_delayed_work(&rport->fail_io_work))
  2821. fc_flush_devloss(shost);
  2822. if (!cancel_delayed_work(&rport->dev_loss_work))
  2823. fc_flush_devloss(shost);
  2824. spin_lock_irqsave(shost->host_lock, flags);
  2825. rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
  2826. FC_RPORT_DEVLOSS_PENDING |
  2827. FC_RPORT_DEVLOSS_CALLBK_DONE);
  2828. spin_unlock_irqrestore(shost->host_lock, flags);
  2829. /* if target, initiate a scan */
  2830. if (rport->scsi_target_id != -1) {
  2831. scsi_target_unblock(&rport->dev,
  2832. SDEV_RUNNING);
  2833. spin_lock_irqsave(shost->host_lock,
  2834. flags);
  2835. rport->flags |= FC_RPORT_SCAN_PENDING;
  2836. scsi_queue_work(shost,
  2837. &rport->scan_work);
  2838. spin_unlock_irqrestore(shost->host_lock,
  2839. flags);
  2840. }
  2841. fc_bsg_goose_queue(rport);
  2842. return rport;
  2843. }
  2844. }
  2845. }
  2846. /*
  2847. * Search the bindings array
  2848. * Note: if never a FCP target, you won't be on this list
  2849. */
  2850. if (fc_host->tgtid_bind_type != FC_TGTID_BIND_NONE) {
  2851. /* search for a matching consistent binding */
  2852. list_for_each_entry(rport, &fc_host->rport_bindings,
  2853. peers) {
  2854. if (rport->channel != channel)
  2855. continue;
  2856. switch (fc_host->tgtid_bind_type) {
  2857. case FC_TGTID_BIND_BY_WWPN:
  2858. if (rport->port_name == ids->port_name)
  2859. match = 1;
  2860. break;
  2861. case FC_TGTID_BIND_BY_WWNN:
  2862. if (rport->node_name == ids->node_name)
  2863. match = 1;
  2864. break;
  2865. case FC_TGTID_BIND_BY_ID:
  2866. if (rport->port_id == ids->port_id)
  2867. match = 1;
  2868. break;
  2869. case FC_TGTID_BIND_NONE: /* to keep compiler happy */
  2870. break;
  2871. }
  2872. if (match) {
  2873. list_move_tail(&rport->peers, &fc_host->rports);
  2874. break;
  2875. }
  2876. }
  2877. if (match) {
  2878. memcpy(&rport->node_name, &ids->node_name,
  2879. sizeof(rport->node_name));
  2880. memcpy(&rport->port_name, &ids->port_name,
  2881. sizeof(rport->port_name));
  2882. rport->port_id = ids->port_id;
  2883. rport->port_state = FC_PORTSTATE_ONLINE;
  2884. rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
  2885. if (fci->f->dd_fcrport_size)
  2886. memset(rport->dd_data, 0,
  2887. fci->f->dd_fcrport_size);
  2888. spin_unlock_irqrestore(shost->host_lock, flags);
  2889. fc_remote_port_rolechg(rport, ids->roles);
  2890. return rport;
  2891. }
  2892. }
  2893. spin_unlock_irqrestore(shost->host_lock, flags);
  2894. /* No consistent binding found - create new remote port entry */
  2895. rport = fc_remote_port_create(shost, channel, ids);
  2896. return rport;
  2897. }
  2898. EXPORT_SYMBOL(fc_remote_port_add);
  2899. /**
  2900. * fc_remote_port_delete - notifies the fc transport that a remote port is no longer in existence.
  2901. * @rport: The remote port that no longer exists
  2902. *
  2903. * The LLDD calls this routine to notify the transport that a remote
  2904. * port is no longer part of the topology. Note: Although a port
  2905. * may no longer be part of the topology, it may persist in the remote
  2906. * ports displayed by the fc_host. We do this under 2 conditions:
  2907. *
  2908. * 1) If the port was a scsi target, we delay its deletion by "blocking" it.
  2909. * This allows the port to temporarily disappear, then reappear without
  2910. * disrupting the SCSI device tree attached to it. During the "blocked"
  2911. * period the port will still exist.
  2912. *
  2913. * 2) If the port was a scsi target and disappears for longer than we
  2914. * expect, we'll delete the port and the tear down the SCSI device tree
  2915. * attached to it. However, we want to semi-persist the target id assigned
  2916. * to that port if it eventually does exist. The port structure will
  2917. * remain (although with minimal information) so that the target id
  2918. * bindings also remain.
  2919. *
  2920. * If the remote port is not an FCP Target, it will be fully torn down
  2921. * and deallocated, including the fc_remote_port class device.
  2922. *
  2923. * If the remote port is an FCP Target, the port will be placed in a
  2924. * temporary blocked state. From the LLDD's perspective, the rport no
  2925. * longer exists. From the SCSI midlayer's perspective, the SCSI target
  2926. * exists, but all sdevs on it are blocked from further I/O. The following
  2927. * is then expected.
  2928. *
  2929. * If the remote port does not return (signaled by a LLDD call to
  2930. * fc_remote_port_add()) within the dev_loss_tmo timeout, then the
  2931. * scsi target is removed - killing all outstanding i/o and removing the
  2932. * scsi devices attached to it. The port structure will be marked Not
  2933. * Present and be partially cleared, leaving only enough information to
  2934. * recognize the remote port relative to the scsi target id binding if
  2935. * it later appears. The port will remain as long as there is a valid
  2936. * binding (e.g. until the user changes the binding type or unloads the
  2937. * scsi host with the binding).
  2938. *
  2939. * If the remote port returns within the dev_loss_tmo value (and matches
  2940. * according to the target id binding type), the port structure will be
  2941. * reused. If it is no longer a SCSI target, the target will be torn
  2942. * down. If it continues to be a SCSI target, then the target will be
  2943. * unblocked (allowing i/o to be resumed), and a scan will be activated
  2944. * to ensure that all luns are detected.
  2945. *
  2946. * Called from normal process context only - cannot be called from interrupt.
  2947. *
  2948. * Notes:
  2949. * This routine assumes no locks are held on entry.
  2950. */
  2951. void
  2952. fc_remote_port_delete(struct fc_rport *rport)
  2953. {
  2954. struct Scsi_Host *shost = rport_to_shost(rport);
  2955. unsigned long timeout = rport->dev_loss_tmo;
  2956. unsigned long flags;
  2957. /*
  2958. * No need to flush the fc_host work_q's, as all adds are synchronous.
  2959. *
  2960. * We do need to reclaim the rport scan work element, so eventually
  2961. * (in fc_rport_final_delete()) we'll flush the scsi host work_q if
  2962. * there's still a scan pending.
  2963. */
  2964. spin_lock_irqsave(shost->host_lock, flags);
  2965. if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
  2966. (rport->port_state != FC_PORTSTATE_MARGINAL)) {
  2967. spin_unlock_irqrestore(shost->host_lock, flags);
  2968. return;
  2969. }
  2970. /*
  2971. * In the past, we if this was not an FCP-Target, we would
  2972. * unconditionally just jump to deleting the rport.
  2973. * However, rports can be used as node containers by the LLDD,
  2974. * and its not appropriate to just terminate the rport at the
  2975. * first sign of a loss in connectivity. The LLDD may want to
  2976. * send ELS traffic to re-validate the login. If the rport is
  2977. * immediately deleted, it makes it inappropriate for a node
  2978. * container.
  2979. * So... we now unconditionally wait dev_loss_tmo before
  2980. * destroying an rport.
  2981. */
  2982. rport->port_state = FC_PORTSTATE_BLOCKED;
  2983. rport->flags |= FC_RPORT_DEVLOSS_PENDING;
  2984. spin_unlock_irqrestore(shost->host_lock, flags);
  2985. scsi_target_block(&rport->dev);
  2986. /* see if we need to kill io faster than waiting for device loss */
  2987. if ((rport->fast_io_fail_tmo != -1) &&
  2988. (rport->fast_io_fail_tmo < timeout))
  2989. fc_queue_devloss_work(shost, &rport->fail_io_work,
  2990. rport->fast_io_fail_tmo * HZ);
  2991. /* cap the length the devices can be blocked until they are deleted */
  2992. fc_queue_devloss_work(shost, &rport->dev_loss_work, timeout * HZ);
  2993. }
  2994. EXPORT_SYMBOL(fc_remote_port_delete);
  2995. /**
  2996. * fc_remote_port_rolechg - notifies the fc transport that the roles on a remote may have changed.
  2997. * @rport: The remote port that changed.
  2998. * @roles: New roles for this port.
  2999. *
  3000. * Description: The LLDD calls this routine to notify the transport that the
  3001. * roles on a remote port may have changed. The largest effect of this is
  3002. * if a port now becomes a FCP Target, it must be allocated a
  3003. * scsi target id. If the port is no longer a FCP target, any
  3004. * scsi target id value assigned to it will persist in case the
  3005. * role changes back to include FCP Target. No changes in the scsi
  3006. * midlayer will be invoked if the role changes (in the expectation
  3007. * that the role will be resumed. If it doesn't normal error processing
  3008. * will take place).
  3009. *
  3010. * Should not be called from interrupt context.
  3011. *
  3012. * Notes:
  3013. * This routine assumes no locks are held on entry.
  3014. */
  3015. void
  3016. fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
  3017. {
  3018. struct Scsi_Host *shost = rport_to_shost(rport);
  3019. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  3020. unsigned long flags;
  3021. int create = 0;
  3022. spin_lock_irqsave(shost->host_lock, flags);
  3023. if (roles & FC_PORT_ROLE_FCP_TARGET) {
  3024. if (rport->scsi_target_id == -1) {
  3025. rport->scsi_target_id = fc_host->next_target_id++;
  3026. create = 1;
  3027. } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
  3028. create = 1;
  3029. }
  3030. rport->roles = roles;
  3031. spin_unlock_irqrestore(shost->host_lock, flags);
  3032. if (create) {
  3033. /*
  3034. * There may have been a delete timer running on the
  3035. * port. Ensure that it is cancelled as we now know
  3036. * the port is an FCP Target.
  3037. * Note: we know the rport exists and is in an online
  3038. * state as the LLDD would not have had an rport
  3039. * reference to pass us.
  3040. *
  3041. * Take no action on the del_timer failure as the state
  3042. * machine state change will validate the
  3043. * transaction.
  3044. */
  3045. if (!cancel_delayed_work(&rport->fail_io_work))
  3046. fc_flush_devloss(shost);
  3047. if (!cancel_delayed_work(&rport->dev_loss_work))
  3048. fc_flush_devloss(shost);
  3049. spin_lock_irqsave(shost->host_lock, flags);
  3050. rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
  3051. FC_RPORT_DEVLOSS_PENDING |
  3052. FC_RPORT_DEVLOSS_CALLBK_DONE);
  3053. spin_unlock_irqrestore(shost->host_lock, flags);
  3054. /* ensure any stgt delete functions are done */
  3055. fc_flush_work(shost);
  3056. scsi_target_unblock(&rport->dev, SDEV_RUNNING);
  3057. /* initiate a scan of the target */
  3058. spin_lock_irqsave(shost->host_lock, flags);
  3059. rport->flags |= FC_RPORT_SCAN_PENDING;
  3060. scsi_queue_work(shost, &rport->scan_work);
  3061. spin_unlock_irqrestore(shost->host_lock, flags);
  3062. }
  3063. }
  3064. EXPORT_SYMBOL(fc_remote_port_rolechg);
  3065. /**
  3066. * fc_timeout_deleted_rport - Timeout handler for a deleted remote port.
  3067. * @work: rport target that failed to reappear in the allotted time.
  3068. *
  3069. * Description: An attempt to delete a remote port blocks, and if it fails
  3070. * to return in the allotted time this gets called.
  3071. */
  3072. static void
  3073. fc_timeout_deleted_rport(struct work_struct *work)
  3074. {
  3075. struct fc_rport *rport =
  3076. container_of(work, struct fc_rport, dev_loss_work.work);
  3077. struct Scsi_Host *shost = rport_to_shost(rport);
  3078. struct fc_internal *i = to_fc_internal(shost->transportt);
  3079. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  3080. unsigned long flags;
  3081. int do_callback = 0;
  3082. spin_lock_irqsave(shost->host_lock, flags);
  3083. rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
  3084. /*
  3085. * If the port is ONLINE, then it came back. If it was a SCSI
  3086. * target, validate it still is. If not, tear down the
  3087. * scsi_target on it.
  3088. */
  3089. if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
  3090. (rport->port_state == FC_PORTSTATE_MARGINAL)) &&
  3091. (rport->scsi_target_id != -1) &&
  3092. !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
  3093. dev_printk(KERN_ERR, &rport->dev,
  3094. "blocked FC remote port time out: no longer"
  3095. " a FCP target, removing starget\n");
  3096. spin_unlock_irqrestore(shost->host_lock, flags);
  3097. scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
  3098. fc_queue_work(shost, &rport->stgt_delete_work);
  3099. return;
  3100. }
  3101. /* NOOP state - we're flushing workq's */
  3102. if (rport->port_state != FC_PORTSTATE_BLOCKED) {
  3103. spin_unlock_irqrestore(shost->host_lock, flags);
  3104. dev_printk(KERN_ERR, &rport->dev,
  3105. "blocked FC remote port time out: leaving"
  3106. " rport%s alone\n",
  3107. (rport->scsi_target_id != -1) ? " and starget" : "");
  3108. return;
  3109. }
  3110. if ((fc_host->tgtid_bind_type == FC_TGTID_BIND_NONE) ||
  3111. (rport->scsi_target_id == -1)) {
  3112. list_del(&rport->peers);
  3113. rport->port_state = FC_PORTSTATE_DELETED;
  3114. dev_printk(KERN_ERR, &rport->dev,
  3115. "blocked FC remote port time out: removing"
  3116. " rport%s\n",
  3117. (rport->scsi_target_id != -1) ? " and starget" : "");
  3118. fc_queue_work(shost, &rport->rport_delete_work);
  3119. spin_unlock_irqrestore(shost->host_lock, flags);
  3120. return;
  3121. }
  3122. dev_printk(KERN_ERR, &rport->dev,
  3123. "blocked FC remote port time out: removing target and "
  3124. "saving binding\n");
  3125. list_move_tail(&rport->peers, &fc_host->rport_bindings);
  3126. /*
  3127. * Note: We do not remove or clear the hostdata area. This allows
  3128. * host-specific target data to persist along with the
  3129. * scsi_target_id. It's up to the host to manage it's hostdata area.
  3130. */
  3131. /*
  3132. * Reinitialize port attributes that may change if the port comes back.
  3133. */
  3134. rport->maxframe_size = -1;
  3135. rport->supported_classes = FC_COS_UNSPECIFIED;
  3136. rport->roles = FC_PORT_ROLE_UNKNOWN;
  3137. rport->port_state = FC_PORTSTATE_NOTPRESENT;
  3138. rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
  3139. /*
  3140. * Pre-emptively kill I/O rather than waiting for the work queue
  3141. * item to teardown the starget. (FCOE libFC folks prefer this
  3142. * and to have the rport_port_id still set when it's done).
  3143. */
  3144. spin_unlock_irqrestore(shost->host_lock, flags);
  3145. fc_terminate_rport_io(rport);
  3146. spin_lock_irqsave(shost->host_lock, flags);
  3147. if (rport->port_state == FC_PORTSTATE_NOTPRESENT) { /* still missing */
  3148. /* remove the identifiers that aren't used in the consisting binding */
  3149. switch (fc_host->tgtid_bind_type) {
  3150. case FC_TGTID_BIND_BY_WWPN:
  3151. rport->node_name = -1;
  3152. rport->port_id = -1;
  3153. break;
  3154. case FC_TGTID_BIND_BY_WWNN:
  3155. rport->port_name = -1;
  3156. rport->port_id = -1;
  3157. break;
  3158. case FC_TGTID_BIND_BY_ID:
  3159. rport->node_name = -1;
  3160. rport->port_name = -1;
  3161. break;
  3162. case FC_TGTID_BIND_NONE: /* to keep compiler happy */
  3163. break;
  3164. }
  3165. /*
  3166. * As this only occurs if the remote port (scsi target)
  3167. * went away and didn't come back - we'll remove
  3168. * all attached scsi devices.
  3169. */
  3170. rport->flags |= FC_RPORT_DEVLOSS_CALLBK_DONE;
  3171. fc_queue_work(shost, &rport->stgt_delete_work);
  3172. do_callback = 1;
  3173. }
  3174. spin_unlock_irqrestore(shost->host_lock, flags);
  3175. /*
  3176. * Notify the driver that the rport is now dead. The LLDD will
  3177. * also guarantee that any communication to the rport is terminated
  3178. *
  3179. * Note: we set the CALLBK_DONE flag above to correspond
  3180. */
  3181. if (do_callback && i->f->dev_loss_tmo_callbk)
  3182. i->f->dev_loss_tmo_callbk(rport);
  3183. }
  3184. /**
  3185. * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a disconnected SCSI target.
  3186. * @work: rport to terminate io on.
  3187. *
  3188. * Notes: Only requests the failure of the io, not that all are flushed
  3189. * prior to returning.
  3190. */
  3191. static void
  3192. fc_timeout_fail_rport_io(struct work_struct *work)
  3193. {
  3194. struct fc_rport *rport =
  3195. container_of(work, struct fc_rport, fail_io_work.work);
  3196. if (rport->port_state != FC_PORTSTATE_BLOCKED)
  3197. return;
  3198. rport->flags |= FC_RPORT_FAST_FAIL_TIMEDOUT;
  3199. fc_terminate_rport_io(rport);
  3200. }
  3201. /**
  3202. * fc_scsi_scan_rport - called to perform a scsi scan on a remote port.
  3203. * @work: remote port to be scanned.
  3204. */
  3205. static void
  3206. fc_scsi_scan_rport(struct work_struct *work)
  3207. {
  3208. struct fc_rport *rport =
  3209. container_of(work, struct fc_rport, scan_work);
  3210. struct Scsi_Host *shost = rport_to_shost(rport);
  3211. struct fc_internal *i = to_fc_internal(shost->transportt);
  3212. unsigned long flags;
  3213. if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
  3214. (rport->port_state == FC_PORTSTATE_MARGINAL)) &&
  3215. (rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
  3216. !(i->f->disable_target_scan)) {
  3217. scsi_scan_target(&rport->dev, rport->channel,
  3218. rport->scsi_target_id, SCAN_WILD_CARD,
  3219. SCSI_SCAN_RESCAN);
  3220. }
  3221. spin_lock_irqsave(shost->host_lock, flags);
  3222. rport->flags &= ~FC_RPORT_SCAN_PENDING;
  3223. spin_unlock_irqrestore(shost->host_lock, flags);
  3224. }
  3225. /**
  3226. * fc_block_rport() - Block SCSI eh thread for blocked fc_rport.
  3227. * @rport: Remote port that scsi_eh is trying to recover.
  3228. *
  3229. * This routine can be called from a FC LLD scsi_eh callback. It
  3230. * blocks the scsi_eh thread until the fc_rport leaves the
  3231. * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
  3232. * necessary to avoid the scsi_eh failing recovery actions for blocked
  3233. * rports which would lead to offlined SCSI devices.
  3234. *
  3235. * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
  3236. * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
  3237. * passed back to scsi_eh.
  3238. */
  3239. int fc_block_rport(struct fc_rport *rport)
  3240. {
  3241. struct Scsi_Host *shost = rport_to_shost(rport);
  3242. unsigned long flags;
  3243. spin_lock_irqsave(shost->host_lock, flags);
  3244. while (rport->port_state == FC_PORTSTATE_BLOCKED &&
  3245. !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)) {
  3246. spin_unlock_irqrestore(shost->host_lock, flags);
  3247. msleep(1000);
  3248. spin_lock_irqsave(shost->host_lock, flags);
  3249. }
  3250. spin_unlock_irqrestore(shost->host_lock, flags);
  3251. if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
  3252. return FAST_IO_FAIL;
  3253. return 0;
  3254. }
  3255. EXPORT_SYMBOL(fc_block_rport);
  3256. /**
  3257. * fc_block_scsi_eh - Block SCSI eh thread for blocked fc_rport
  3258. * @cmnd: SCSI command that scsi_eh is trying to recover
  3259. *
  3260. * This routine can be called from a FC LLD scsi_eh callback. It
  3261. * blocks the scsi_eh thread until the fc_rport leaves the
  3262. * FC_PORTSTATE_BLOCKED, or the fast_io_fail_tmo fires. This is
  3263. * necessary to avoid the scsi_eh failing recovery actions for blocked
  3264. * rports which would lead to offlined SCSI devices.
  3265. *
  3266. * Returns: 0 if the fc_rport left the state FC_PORTSTATE_BLOCKED.
  3267. * FAST_IO_FAIL if the fast_io_fail_tmo fired, this should be
  3268. * passed back to scsi_eh.
  3269. */
  3270. int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
  3271. {
  3272. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  3273. if (WARN_ON_ONCE(!rport))
  3274. return FAST_IO_FAIL;
  3275. return fc_block_rport(rport);
  3276. }
  3277. EXPORT_SYMBOL(fc_block_scsi_eh);
  3278. /*
  3279. * fc_eh_should_retry_cmd - Checks if the cmd should be retried or not
  3280. * @scmd: The SCSI command to be checked
  3281. *
  3282. * This checks the rport state to decide if a cmd is
  3283. * retryable.
  3284. *
  3285. * Returns: true if the rport state is not in marginal state.
  3286. */
  3287. bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
  3288. {
  3289. struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
  3290. if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
  3291. (scsi_cmd_to_rq(scmd)->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
  3292. set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
  3293. return false;
  3294. }
  3295. return true;
  3296. }
  3297. EXPORT_SYMBOL_GPL(fc_eh_should_retry_cmd);
  3298. /**
  3299. * fc_vport_setup - allocates and creates a FC virtual port.
  3300. * @shost: scsi host the virtual port is connected to.
  3301. * @channel: Channel on shost port connected to.
  3302. * @pdev: parent device for vport
  3303. * @ids: The world wide names, FC4 port roles, etc for
  3304. * the virtual port.
  3305. * @ret_vport: The pointer to the created vport.
  3306. *
  3307. * Allocates and creates the vport structure, calls the parent host
  3308. * to instantiate the vport, this completes w/ class and sysfs creation.
  3309. *
  3310. * Notes:
  3311. * This routine assumes no locks are held on entry.
  3312. */
  3313. static int
  3314. fc_vport_setup(struct Scsi_Host *shost, int channel, struct device *pdev,
  3315. struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
  3316. {
  3317. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  3318. struct fc_internal *fci = to_fc_internal(shost->transportt);
  3319. struct fc_vport *vport;
  3320. struct device *dev;
  3321. unsigned long flags;
  3322. size_t size;
  3323. int error;
  3324. *ret_vport = NULL;
  3325. if ( ! fci->f->vport_create)
  3326. return -ENOENT;
  3327. size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
  3328. vport = kzalloc(size, GFP_KERNEL);
  3329. if (unlikely(!vport)) {
  3330. printk(KERN_ERR "%s: allocation failure\n", __func__);
  3331. return -ENOMEM;
  3332. }
  3333. vport->vport_state = FC_VPORT_UNKNOWN;
  3334. vport->vport_last_state = FC_VPORT_UNKNOWN;
  3335. vport->node_name = ids->node_name;
  3336. vport->port_name = ids->port_name;
  3337. vport->roles = ids->roles;
  3338. vport->vport_type = ids->vport_type;
  3339. if (fci->f->dd_fcvport_size)
  3340. vport->dd_data = &vport[1];
  3341. vport->shost = shost;
  3342. vport->channel = channel;
  3343. vport->flags = FC_VPORT_CREATING;
  3344. INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
  3345. spin_lock_irqsave(shost->host_lock, flags);
  3346. if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
  3347. spin_unlock_irqrestore(shost->host_lock, flags);
  3348. kfree(vport);
  3349. return -ENOSPC;
  3350. }
  3351. fc_host->npiv_vports_inuse++;
  3352. vport->number = fc_host->next_vport_number++;
  3353. list_add_tail(&vport->peers, &fc_host->vports);
  3354. scsi_host_get(shost); /* for fc_host->vport list */
  3355. spin_unlock_irqrestore(shost->host_lock, flags);
  3356. dev = &vport->dev;
  3357. device_initialize(dev); /* takes self reference */
  3358. dev->parent = get_device(pdev); /* takes parent reference */
  3359. dev->release = fc_vport_dev_release;
  3360. dev_set_name(dev, "vport-%d:%d-%d",
  3361. shost->host_no, channel, vport->number);
  3362. transport_setup_device(dev);
  3363. error = device_add(dev);
  3364. if (error) {
  3365. printk(KERN_ERR "FC Virtual Port device_add failed\n");
  3366. goto delete_vport;
  3367. }
  3368. transport_add_device(dev);
  3369. transport_configure_device(dev);
  3370. error = fci->f->vport_create(vport, ids->disable);
  3371. if (error) {
  3372. printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
  3373. goto delete_vport_all;
  3374. }
  3375. /*
  3376. * if the parent isn't the physical adapter's Scsi_Host, ensure
  3377. * the Scsi_Host at least contains a symlink to the vport.
  3378. */
  3379. if (pdev != &shost->shost_gendev) {
  3380. error = sysfs_create_link(&shost->shost_gendev.kobj,
  3381. &dev->kobj, dev_name(dev));
  3382. if (error)
  3383. printk(KERN_ERR
  3384. "%s: Cannot create vport symlinks for "
  3385. "%s, err=%d\n",
  3386. __func__, dev_name(dev), error);
  3387. }
  3388. spin_lock_irqsave(shost->host_lock, flags);
  3389. vport->flags &= ~FC_VPORT_CREATING;
  3390. spin_unlock_irqrestore(shost->host_lock, flags);
  3391. dev_printk(KERN_NOTICE, pdev,
  3392. "%s created via shost%d channel %d\n", dev_name(dev),
  3393. shost->host_no, channel);
  3394. *ret_vport = vport;
  3395. return 0;
  3396. delete_vport_all:
  3397. transport_remove_device(dev);
  3398. device_del(dev);
  3399. delete_vport:
  3400. transport_destroy_device(dev);
  3401. spin_lock_irqsave(shost->host_lock, flags);
  3402. list_del(&vport->peers);
  3403. scsi_host_put(shost); /* for fc_host->vport list */
  3404. fc_host->npiv_vports_inuse--;
  3405. spin_unlock_irqrestore(shost->host_lock, flags);
  3406. put_device(dev->parent);
  3407. kfree(vport);
  3408. return error;
  3409. }
  3410. /**
  3411. * fc_vport_create - Admin App or LLDD requests creation of a vport
  3412. * @shost: scsi host the virtual port is connected to.
  3413. * @channel: channel on shost port connected to.
  3414. * @ids: The world wide names, FC4 port roles, etc for
  3415. * the virtual port.
  3416. *
  3417. * Notes:
  3418. * This routine assumes no locks are held on entry.
  3419. */
  3420. struct fc_vport *
  3421. fc_vport_create(struct Scsi_Host *shost, int channel,
  3422. struct fc_vport_identifiers *ids)
  3423. {
  3424. int stat;
  3425. struct fc_vport *vport;
  3426. stat = fc_vport_setup(shost, channel, &shost->shost_gendev,
  3427. ids, &vport);
  3428. return stat ? NULL : vport;
  3429. }
  3430. EXPORT_SYMBOL(fc_vport_create);
  3431. /**
  3432. * fc_vport_terminate - Admin App or LLDD requests termination of a vport
  3433. * @vport: fc_vport to be terminated
  3434. *
  3435. * Calls the LLDD vport_delete() function, then deallocates and removes
  3436. * the vport from the shost and object tree.
  3437. *
  3438. * Notes:
  3439. * This routine assumes no locks are held on entry.
  3440. */
  3441. int
  3442. fc_vport_terminate(struct fc_vport *vport)
  3443. {
  3444. struct Scsi_Host *shost = vport_to_shost(vport);
  3445. struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
  3446. struct fc_internal *i = to_fc_internal(shost->transportt);
  3447. struct device *dev = &vport->dev;
  3448. unsigned long flags;
  3449. int stat;
  3450. if (i->f->vport_delete)
  3451. stat = i->f->vport_delete(vport);
  3452. else
  3453. stat = -ENOENT;
  3454. spin_lock_irqsave(shost->host_lock, flags);
  3455. vport->flags &= ~FC_VPORT_DELETING;
  3456. if (!stat) {
  3457. vport->flags |= FC_VPORT_DELETED;
  3458. list_del(&vport->peers);
  3459. fc_host->npiv_vports_inuse--;
  3460. scsi_host_put(shost); /* for fc_host->vport list */
  3461. }
  3462. spin_unlock_irqrestore(shost->host_lock, flags);
  3463. if (stat)
  3464. return stat;
  3465. if (dev->parent != &shost->shost_gendev)
  3466. sysfs_remove_link(&shost->shost_gendev.kobj, dev_name(dev));
  3467. transport_remove_device(dev);
  3468. device_del(dev);
  3469. transport_destroy_device(dev);
  3470. /*
  3471. * Removing our self-reference should mean our
  3472. * release function gets called, which will drop the remaining
  3473. * parent reference and free the data structure.
  3474. */
  3475. put_device(dev); /* for self-reference */
  3476. return 0; /* SUCCESS */
  3477. }
  3478. EXPORT_SYMBOL(fc_vport_terminate);
  3479. /**
  3480. * fc_vport_sched_delete - workq-based delete request for a vport
  3481. * @work: vport to be deleted.
  3482. */
  3483. static void
  3484. fc_vport_sched_delete(struct work_struct *work)
  3485. {
  3486. struct fc_vport *vport =
  3487. container_of(work, struct fc_vport, vport_delete_work);
  3488. int stat;
  3489. stat = fc_vport_terminate(vport);
  3490. if (stat)
  3491. dev_printk(KERN_ERR, vport->dev.parent,
  3492. "%s: %s could not be deleted created via "
  3493. "shost%d channel %d - error %d\n", __func__,
  3494. dev_name(&vport->dev), vport->shost->host_no,
  3495. vport->channel, stat);
  3496. }
  3497. /*
  3498. * BSG support
  3499. */
  3500. /**
  3501. * fc_bsg_job_timeout - handler for when a bsg request timesout
  3502. * @req: request that timed out
  3503. */
  3504. static enum blk_eh_timer_return
  3505. fc_bsg_job_timeout(struct request *req)
  3506. {
  3507. struct bsg_job *job = blk_mq_rq_to_pdu(req);
  3508. struct Scsi_Host *shost = fc_bsg_to_shost(job);
  3509. struct fc_rport *rport = fc_bsg_to_rport(job);
  3510. struct fc_internal *i = to_fc_internal(shost->transportt);
  3511. int err = 0, inflight = 0;
  3512. if (rport && rport->port_state == FC_PORTSTATE_BLOCKED)
  3513. return BLK_EH_RESET_TIMER;
  3514. inflight = bsg_job_get(job);
  3515. if (inflight && i->f->bsg_timeout) {
  3516. /* call LLDD to abort the i/o as it has timed out */
  3517. err = i->f->bsg_timeout(job);
  3518. if (err == -EAGAIN) {
  3519. bsg_job_put(job);
  3520. return BLK_EH_RESET_TIMER;
  3521. } else if (err)
  3522. printk(KERN_ERR "ERROR: FC BSG request timeout - LLD "
  3523. "abort failed with status %d\n", err);
  3524. }
  3525. /* the blk_end_sync_io() doesn't check the error */
  3526. if (inflight)
  3527. blk_mq_end_request(req, BLK_STS_IOERR);
  3528. return BLK_EH_DONE;
  3529. }
  3530. /**
  3531. * fc_bsg_host_dispatch - process fc host bsg requests and dispatch to LLDD
  3532. * @shost: scsi host rport attached to
  3533. * @job: bsg job to be processed
  3534. */
  3535. static int fc_bsg_host_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
  3536. {
  3537. struct fc_internal *i = to_fc_internal(shost->transportt);
  3538. struct fc_bsg_request *bsg_request = job->request;
  3539. struct fc_bsg_reply *bsg_reply = job->reply;
  3540. int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
  3541. int ret;
  3542. /* check if we really have all the request data needed */
  3543. if (job->request_len < cmdlen) {
  3544. ret = -ENOMSG;
  3545. goto fail_host_msg;
  3546. }
  3547. /* Validate the host command */
  3548. switch (bsg_request->msgcode) {
  3549. case FC_BSG_HST_ADD_RPORT:
  3550. cmdlen += sizeof(struct fc_bsg_host_add_rport);
  3551. break;
  3552. case FC_BSG_HST_DEL_RPORT:
  3553. cmdlen += sizeof(struct fc_bsg_host_del_rport);
  3554. break;
  3555. case FC_BSG_HST_ELS_NOLOGIN:
  3556. cmdlen += sizeof(struct fc_bsg_host_els);
  3557. /* there better be a xmt and rcv payloads */
  3558. if ((!job->request_payload.payload_len) ||
  3559. (!job->reply_payload.payload_len)) {
  3560. ret = -EINVAL;
  3561. goto fail_host_msg;
  3562. }
  3563. break;
  3564. case FC_BSG_HST_CT:
  3565. cmdlen += sizeof(struct fc_bsg_host_ct);
  3566. /* there better be xmt and rcv payloads */
  3567. if ((!job->request_payload.payload_len) ||
  3568. (!job->reply_payload.payload_len)) {
  3569. ret = -EINVAL;
  3570. goto fail_host_msg;
  3571. }
  3572. break;
  3573. case FC_BSG_HST_VENDOR:
  3574. cmdlen += sizeof(struct fc_bsg_host_vendor);
  3575. if ((shost->hostt->vendor_id == 0L) ||
  3576. (bsg_request->rqst_data.h_vendor.vendor_id !=
  3577. shost->hostt->vendor_id)) {
  3578. ret = -ESRCH;
  3579. goto fail_host_msg;
  3580. }
  3581. break;
  3582. default:
  3583. ret = -EBADR;
  3584. goto fail_host_msg;
  3585. }
  3586. ret = i->f->bsg_request(job);
  3587. if (!ret)
  3588. return 0;
  3589. fail_host_msg:
  3590. /* return the errno failure code as the only status */
  3591. BUG_ON(job->reply_len < sizeof(uint32_t));
  3592. bsg_reply->reply_payload_rcv_len = 0;
  3593. bsg_reply->result = ret;
  3594. job->reply_len = sizeof(uint32_t);
  3595. bsg_job_done(job, bsg_reply->result,
  3596. bsg_reply->reply_payload_rcv_len);
  3597. return 0;
  3598. }
  3599. /*
  3600. * fc_bsg_goose_queue - restart rport queue in case it was stopped
  3601. * @rport: rport to be restarted
  3602. */
  3603. static void
  3604. fc_bsg_goose_queue(struct fc_rport *rport)
  3605. {
  3606. struct request_queue *q = rport->rqst_q;
  3607. if (q)
  3608. blk_mq_run_hw_queues(q, true);
  3609. }
  3610. /**
  3611. * fc_bsg_rport_dispatch - process rport bsg requests and dispatch to LLDD
  3612. * @shost: scsi host rport attached to
  3613. * @job: bsg job to be processed
  3614. */
  3615. static int fc_bsg_rport_dispatch(struct Scsi_Host *shost, struct bsg_job *job)
  3616. {
  3617. struct fc_internal *i = to_fc_internal(shost->transportt);
  3618. struct fc_bsg_request *bsg_request = job->request;
  3619. struct fc_bsg_reply *bsg_reply = job->reply;
  3620. int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
  3621. int ret;
  3622. /* check if we really have all the request data needed */
  3623. if (job->request_len < cmdlen) {
  3624. ret = -ENOMSG;
  3625. goto fail_rport_msg;
  3626. }
  3627. /* Validate the rport command */
  3628. switch (bsg_request->msgcode) {
  3629. case FC_BSG_RPT_ELS:
  3630. cmdlen += sizeof(struct fc_bsg_rport_els);
  3631. goto check_bidi;
  3632. case FC_BSG_RPT_CT:
  3633. cmdlen += sizeof(struct fc_bsg_rport_ct);
  3634. check_bidi:
  3635. /* there better be xmt and rcv payloads */
  3636. if ((!job->request_payload.payload_len) ||
  3637. (!job->reply_payload.payload_len)) {
  3638. ret = -EINVAL;
  3639. goto fail_rport_msg;
  3640. }
  3641. break;
  3642. default:
  3643. ret = -EBADR;
  3644. goto fail_rport_msg;
  3645. }
  3646. ret = i->f->bsg_request(job);
  3647. if (!ret)
  3648. return 0;
  3649. fail_rport_msg:
  3650. /* return the errno failure code as the only status */
  3651. BUG_ON(job->reply_len < sizeof(uint32_t));
  3652. bsg_reply->reply_payload_rcv_len = 0;
  3653. bsg_reply->result = ret;
  3654. job->reply_len = sizeof(uint32_t);
  3655. bsg_job_done(job, bsg_reply->result,
  3656. bsg_reply->reply_payload_rcv_len);
  3657. return 0;
  3658. }
  3659. static int fc_bsg_dispatch(struct bsg_job *job)
  3660. {
  3661. struct Scsi_Host *shost = fc_bsg_to_shost(job);
  3662. if (scsi_is_fc_rport(job->dev))
  3663. return fc_bsg_rport_dispatch(shost, job);
  3664. else
  3665. return fc_bsg_host_dispatch(shost, job);
  3666. }
  3667. static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
  3668. {
  3669. if (rport->port_state == FC_PORTSTATE_BLOCKED &&
  3670. !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
  3671. return BLK_STS_RESOURCE;
  3672. if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
  3673. (rport->port_state != FC_PORTSTATE_MARGINAL))
  3674. return BLK_STS_IOERR;
  3675. return BLK_STS_OK;
  3676. }
  3677. static int fc_bsg_dispatch_prep(struct bsg_job *job)
  3678. {
  3679. struct fc_rport *rport = fc_bsg_to_rport(job);
  3680. blk_status_t ret;
  3681. ret = fc_bsg_rport_prep(rport);
  3682. switch (ret) {
  3683. case BLK_STS_OK:
  3684. break;
  3685. case BLK_STS_RESOURCE:
  3686. return -EAGAIN;
  3687. default:
  3688. return -EIO;
  3689. }
  3690. return fc_bsg_dispatch(job);
  3691. }
  3692. /**
  3693. * fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
  3694. * @shost: shost for fc_host
  3695. * @fc_host: fc_host adding the structures to
  3696. */
  3697. static int
  3698. fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
  3699. {
  3700. struct device *dev = &shost->shost_gendev;
  3701. struct fc_internal *i = to_fc_internal(shost->transportt);
  3702. struct request_queue *q;
  3703. char bsg_name[20];
  3704. fc_host->rqst_q = NULL;
  3705. if (!i->f->bsg_request)
  3706. return -ENOTSUPP;
  3707. snprintf(bsg_name, sizeof(bsg_name),
  3708. "fc_host%d", shost->host_no);
  3709. q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout,
  3710. i->f->dd_bsg_size);
  3711. if (IS_ERR(q)) {
  3712. dev_err(dev,
  3713. "fc_host%d: bsg interface failed to initialize - setup queue\n",
  3714. shost->host_no);
  3715. return PTR_ERR(q);
  3716. }
  3717. __scsi_init_queue(shost, q);
  3718. blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
  3719. fc_host->rqst_q = q;
  3720. return 0;
  3721. }
  3722. /**
  3723. * fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
  3724. * @shost: shost that rport is attached to
  3725. * @rport: rport that the bsg hooks are being attached to
  3726. */
  3727. static int
  3728. fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
  3729. {
  3730. struct device *dev = &rport->dev;
  3731. struct fc_internal *i = to_fc_internal(shost->transportt);
  3732. struct request_queue *q;
  3733. rport->rqst_q = NULL;
  3734. if (!i->f->bsg_request)
  3735. return -ENOTSUPP;
  3736. q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep,
  3737. fc_bsg_job_timeout, i->f->dd_bsg_size);
  3738. if (IS_ERR(q)) {
  3739. dev_err(dev, "failed to setup bsg queue\n");
  3740. return PTR_ERR(q);
  3741. }
  3742. __scsi_init_queue(shost, q);
  3743. blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
  3744. rport->rqst_q = q;
  3745. return 0;
  3746. }
  3747. /**
  3748. * fc_bsg_remove - Deletes the bsg hooks on fchosts/rports
  3749. * @q: the request_queue that is to be torn down.
  3750. *
  3751. * Notes:
  3752. * Before unregistering the queue empty any requests that are blocked
  3753. *
  3754. *
  3755. */
  3756. static void
  3757. fc_bsg_remove(struct request_queue *q)
  3758. {
  3759. bsg_remove_queue(q);
  3760. }
  3761. /* Original Author: Martin Hicks */
  3762. MODULE_AUTHOR("James Smart");
  3763. MODULE_DESCRIPTION("FC Transport Attributes");
  3764. MODULE_LICENSE("GPL");
  3765. module_init(fc_transport_init);
  3766. module_exit(fc_transport_exit);