connect.c 129 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827
  1. // SPDX-License-Identifier: LGPL-2.1
  2. /*
  3. *
  4. * Copyright (C) International Business Machines Corp., 2002,2011
  5. * Author(s): Steve French ([email protected])
  6. *
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/net.h>
  10. #include <linux/string.h>
  11. #include <linux/sched/mm.h>
  12. #include <linux/sched/signal.h>
  13. #include <linux/list.h>
  14. #include <linux/wait.h>
  15. #include <linux/slab.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/ctype.h>
  18. #include <linux/utsname.h>
  19. #include <linux/mempool.h>
  20. #include <linux/delay.h>
  21. #include <linux/completion.h>
  22. #include <linux/kthread.h>
  23. #include <linux/pagevec.h>
  24. #include <linux/freezer.h>
  25. #include <linux/namei.h>
  26. #include <linux/uuid.h>
  27. #include <linux/uaccess.h>
  28. #include <asm/processor.h>
  29. #include <linux/inet.h>
  30. #include <linux/module.h>
  31. #include <keys/user-type.h>
  32. #include <net/ipv6.h>
  33. #include <linux/parser.h>
  34. #include <linux/bvec.h>
  35. #include "cifspdu.h"
  36. #include "cifsglob.h"
  37. #include "cifsproto.h"
  38. #include "cifs_unicode.h"
  39. #include "cifs_debug.h"
  40. #include "cifs_fs_sb.h"
  41. #include "ntlmssp.h"
  42. #include "nterr.h"
  43. #include "rfc1002pdu.h"
  44. #include "fscache.h"
  45. #include "smb2proto.h"
  46. #include "smbdirect.h"
  47. #include "dns_resolve.h"
  48. #ifdef CONFIG_CIFS_DFS_UPCALL
  49. #include "dfs_cache.h"
  50. #endif
  51. #include "fs_context.h"
  52. #include "cifs_swn.h"
  53. extern mempool_t *cifs_req_poolp;
  54. extern bool disable_legacy_dialects;
  55. /* FIXME: should these be tunable? */
  56. #define TLINK_ERROR_EXPIRE (1 * HZ)
  57. #define TLINK_IDLE_EXPIRE (600 * HZ)
  58. /* Drop the connection to not overload the server */
  59. #define MAX_STATUS_IO_TIMEOUT 5
  60. struct mount_ctx {
  61. struct cifs_sb_info *cifs_sb;
  62. struct smb3_fs_context *fs_ctx;
  63. unsigned int xid;
  64. struct TCP_Server_Info *server;
  65. struct cifs_ses *ses;
  66. struct cifs_tcon *tcon;
  67. #ifdef CONFIG_CIFS_DFS_UPCALL
  68. struct cifs_ses *root_ses;
  69. uuid_t mount_id;
  70. char *origin_fullpath, *leaf_fullpath;
  71. #endif
  72. };
  73. static int ip_connect(struct TCP_Server_Info *server);
  74. static int generic_ip_connect(struct TCP_Server_Info *server);
  75. static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
  76. static void cifs_prune_tlinks(struct work_struct *work);
  77. /*
  78. * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
  79. * get their ip addresses changed at some point.
  80. *
  81. * This should be called with server->srv_mutex held.
  82. */
  83. static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
  84. {
  85. int rc;
  86. int len;
  87. char *unc, *ipaddr = NULL;
  88. time64_t expiry, now;
  89. unsigned long ttl = SMB_DNS_RESOLVE_INTERVAL_DEFAULT;
  90. if (!server->hostname)
  91. return -EINVAL;
  92. /* if server hostname isn't populated, there's nothing to do here */
  93. if (server->hostname[0] == '\0')
  94. return 0;
  95. len = strlen(server->hostname) + 3;
  96. unc = kmalloc(len, GFP_KERNEL);
  97. if (!unc) {
  98. cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
  99. return -ENOMEM;
  100. }
  101. scnprintf(unc, len, "\\\\%s", server->hostname);
  102. rc = dns_resolve_server_name_to_ip(unc, &ipaddr, &expiry);
  103. kfree(unc);
  104. if (rc < 0) {
  105. cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
  106. __func__, server->hostname, rc);
  107. goto requeue_resolve;
  108. }
  109. spin_lock(&server->srv_lock);
  110. rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
  111. strlen(ipaddr));
  112. spin_unlock(&server->srv_lock);
  113. kfree(ipaddr);
  114. /* rc == 1 means success here */
  115. if (rc) {
  116. now = ktime_get_real_seconds();
  117. if (expiry && expiry > now)
  118. /*
  119. * To make sure we don't use the cached entry, retry 1s
  120. * after expiry.
  121. */
  122. ttl = max_t(unsigned long, expiry - now, SMB_DNS_RESOLVE_INTERVAL_MIN) + 1;
  123. }
  124. rc = !rc ? -1 : 0;
  125. requeue_resolve:
  126. cifs_dbg(FYI, "%s: next dns resolution scheduled for %lu seconds in the future\n",
  127. __func__, ttl);
  128. mod_delayed_work(cifsiod_wq, &server->resolve, (ttl * HZ));
  129. return rc;
  130. }
  131. static void smb2_query_server_interfaces(struct work_struct *work)
  132. {
  133. int rc;
  134. struct cifs_tcon *tcon = container_of(work,
  135. struct cifs_tcon,
  136. query_interfaces.work);
  137. /*
  138. * query server network interfaces, in case they change
  139. */
  140. rc = SMB3_request_interfaces(0, tcon, false);
  141. if (rc) {
  142. cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
  143. __func__, rc);
  144. }
  145. queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
  146. (SMB_INTERFACE_POLL_INTERVAL * HZ));
  147. }
  148. static void cifs_resolve_server(struct work_struct *work)
  149. {
  150. int rc;
  151. struct TCP_Server_Info *server = container_of(work,
  152. struct TCP_Server_Info, resolve.work);
  153. cifs_server_lock(server);
  154. /*
  155. * Resolve the hostname again to make sure that IP address is up-to-date.
  156. */
  157. rc = reconn_set_ipaddr_from_hostname(server);
  158. if (rc) {
  159. cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
  160. __func__, rc);
  161. }
  162. cifs_server_unlock(server);
  163. }
  164. /*
  165. * Update the tcpStatus for the server.
  166. * This is used to signal the cifsd thread to call cifs_reconnect
  167. * ONLY cifsd thread should call cifs_reconnect. For any other
  168. * thread, use this function
  169. *
  170. * @server: the tcp ses for which reconnect is needed
  171. * @all_channels: if this needs to be done for all channels
  172. */
  173. void
  174. cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
  175. bool all_channels)
  176. {
  177. struct TCP_Server_Info *pserver;
  178. struct cifs_ses *ses;
  179. int i;
  180. /* If server is a channel, select the primary channel */
  181. pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
  182. /* if we need to signal just this channel */
  183. if (!all_channels) {
  184. spin_lock(&server->srv_lock);
  185. if (server->tcpStatus != CifsExiting)
  186. server->tcpStatus = CifsNeedReconnect;
  187. spin_unlock(&server->srv_lock);
  188. return;
  189. }
  190. spin_lock(&cifs_tcp_ses_lock);
  191. list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
  192. spin_lock(&ses->chan_lock);
  193. for (i = 0; i < ses->chan_count; i++) {
  194. spin_lock(&ses->chans[i].server->srv_lock);
  195. ses->chans[i].server->tcpStatus = CifsNeedReconnect;
  196. spin_unlock(&ses->chans[i].server->srv_lock);
  197. }
  198. spin_unlock(&ses->chan_lock);
  199. }
  200. spin_unlock(&cifs_tcp_ses_lock);
  201. }
  202. /*
  203. * Mark all sessions and tcons for reconnect.
  204. * IMPORTANT: make sure that this gets called only from
  205. * cifsd thread. For any other thread, use
  206. * cifs_signal_cifsd_for_reconnect
  207. *
  208. * @server: the tcp ses for which reconnect is needed
  209. * @server needs to be previously set to CifsNeedReconnect.
  210. * @mark_smb_session: whether even sessions need to be marked
  211. */
  212. void
  213. cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
  214. bool mark_smb_session)
  215. {
  216. struct TCP_Server_Info *pserver;
  217. struct cifs_ses *ses, *nses;
  218. struct cifs_tcon *tcon;
  219. /*
  220. * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
  221. * are not used until reconnected.
  222. */
  223. cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
  224. /* If server is a channel, select the primary channel */
  225. pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
  226. spin_lock(&cifs_tcp_ses_lock);
  227. list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
  228. /* check if iface is still active */
  229. if (!cifs_chan_is_iface_active(ses, server))
  230. cifs_chan_update_iface(ses, server);
  231. spin_lock(&ses->chan_lock);
  232. if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
  233. spin_unlock(&ses->chan_lock);
  234. continue;
  235. }
  236. if (mark_smb_session)
  237. CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
  238. else
  239. cifs_chan_set_need_reconnect(ses, server);
  240. cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
  241. __func__, ses->chans_need_reconnect);
  242. /* If all channels need reconnect, then tcon needs reconnect */
  243. if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
  244. spin_unlock(&ses->chan_lock);
  245. continue;
  246. }
  247. spin_unlock(&ses->chan_lock);
  248. spin_lock(&ses->ses_lock);
  249. ses->ses_status = SES_NEED_RECON;
  250. spin_unlock(&ses->ses_lock);
  251. list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
  252. tcon->need_reconnect = true;
  253. spin_lock(&tcon->tc_lock);
  254. tcon->status = TID_NEED_RECON;
  255. spin_unlock(&tcon->tc_lock);
  256. }
  257. if (ses->tcon_ipc) {
  258. ses->tcon_ipc->need_reconnect = true;
  259. spin_lock(&ses->tcon_ipc->tc_lock);
  260. ses->tcon_ipc->status = TID_NEED_RECON;
  261. spin_unlock(&ses->tcon_ipc->tc_lock);
  262. }
  263. }
  264. spin_unlock(&cifs_tcp_ses_lock);
  265. }
  266. static void
  267. cifs_abort_connection(struct TCP_Server_Info *server)
  268. {
  269. struct mid_q_entry *mid, *nmid;
  270. struct list_head retry_list;
  271. server->maxBuf = 0;
  272. server->max_read = 0;
  273. /* do not want to be sending data on a socket we are freeing */
  274. cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
  275. cifs_server_lock(server);
  276. if (server->ssocket) {
  277. cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
  278. server->ssocket->flags);
  279. kernel_sock_shutdown(server->ssocket, SHUT_WR);
  280. cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
  281. server->ssocket->flags);
  282. sock_release(server->ssocket);
  283. server->ssocket = NULL;
  284. }
  285. server->sequence_number = 0;
  286. server->session_estab = false;
  287. kfree_sensitive(server->session_key.response);
  288. server->session_key.response = NULL;
  289. server->session_key.len = 0;
  290. server->lstrp = jiffies;
  291. /* mark submitted MIDs for retry and issue callback */
  292. INIT_LIST_HEAD(&retry_list);
  293. cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
  294. spin_lock(&server->mid_lock);
  295. list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
  296. kref_get(&mid->refcount);
  297. if (mid->mid_state == MID_REQUEST_SUBMITTED)
  298. mid->mid_state = MID_RETRY_NEEDED;
  299. list_move(&mid->qhead, &retry_list);
  300. mid->mid_flags |= MID_DELETED;
  301. }
  302. spin_unlock(&server->mid_lock);
  303. cifs_server_unlock(server);
  304. cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
  305. list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
  306. list_del_init(&mid->qhead);
  307. mid->callback(mid);
  308. release_mid(mid);
  309. }
  310. if (cifs_rdma_enabled(server)) {
  311. cifs_server_lock(server);
  312. smbd_destroy(server);
  313. cifs_server_unlock(server);
  314. }
  315. }
  316. static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
  317. {
  318. spin_lock(&server->srv_lock);
  319. server->nr_targets = num_targets;
  320. if (server->tcpStatus == CifsExiting) {
  321. /* the demux thread will exit normally next time through the loop */
  322. spin_unlock(&server->srv_lock);
  323. wake_up(&server->response_q);
  324. return false;
  325. }
  326. cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
  327. trace_smb3_reconnect(server->CurrentMid, server->conn_id,
  328. server->hostname);
  329. server->tcpStatus = CifsNeedReconnect;
  330. spin_unlock(&server->srv_lock);
  331. return true;
  332. }
  333. /*
  334. * cifs tcp session reconnection
  335. *
  336. * mark tcp session as reconnecting so temporarily locked
  337. * mark all smb sessions as reconnecting for tcp session
  338. * reconnect tcp session
  339. * wake up waiters on reconnection? - (not needed currently)
  340. *
  341. * if mark_smb_session is passed as true, unconditionally mark
  342. * the smb session (and tcon) for reconnect as well. This value
  343. * doesn't really matter for non-multichannel scenario.
  344. *
  345. */
  346. static int __cifs_reconnect(struct TCP_Server_Info *server,
  347. bool mark_smb_session)
  348. {
  349. int rc = 0;
  350. if (!cifs_tcp_ses_needs_reconnect(server, 1))
  351. return 0;
  352. cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
  353. cifs_abort_connection(server);
  354. do {
  355. try_to_freeze();
  356. cifs_server_lock(server);
  357. if (!cifs_swn_set_server_dstaddr(server)) {
  358. /* resolve the hostname again to make sure that IP address is up-to-date */
  359. rc = reconn_set_ipaddr_from_hostname(server);
  360. cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
  361. }
  362. if (cifs_rdma_enabled(server))
  363. rc = smbd_reconnect(server);
  364. else
  365. rc = generic_ip_connect(server);
  366. if (rc) {
  367. cifs_server_unlock(server);
  368. cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
  369. msleep(3000);
  370. } else {
  371. atomic_inc(&tcpSesReconnectCount);
  372. set_credits(server, 1);
  373. spin_lock(&server->srv_lock);
  374. if (server->tcpStatus != CifsExiting)
  375. server->tcpStatus = CifsNeedNegotiate;
  376. spin_unlock(&server->srv_lock);
  377. cifs_swn_reset_server_dstaddr(server);
  378. cifs_server_unlock(server);
  379. mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
  380. }
  381. } while (server->tcpStatus == CifsNeedReconnect);
  382. spin_lock(&server->srv_lock);
  383. if (server->tcpStatus == CifsNeedNegotiate)
  384. mod_delayed_work(cifsiod_wq, &server->echo, 0);
  385. spin_unlock(&server->srv_lock);
  386. wake_up(&server->response_q);
  387. return rc;
  388. }
  389. #ifdef CONFIG_CIFS_DFS_UPCALL
  390. static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
  391. {
  392. int rc;
  393. char *hostname;
  394. if (!cifs_swn_set_server_dstaddr(server)) {
  395. if (server->hostname != target) {
  396. hostname = extract_hostname(target);
  397. if (!IS_ERR(hostname)) {
  398. spin_lock(&server->srv_lock);
  399. kfree(server->hostname);
  400. server->hostname = hostname;
  401. spin_unlock(&server->srv_lock);
  402. } else {
  403. cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
  404. __func__, PTR_ERR(hostname));
  405. cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
  406. server->hostname);
  407. }
  408. }
  409. /* resolve the hostname again to make sure that IP address is up-to-date. */
  410. rc = reconn_set_ipaddr_from_hostname(server);
  411. cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
  412. }
  413. /* Reconnect the socket */
  414. if (cifs_rdma_enabled(server))
  415. rc = smbd_reconnect(server);
  416. else
  417. rc = generic_ip_connect(server);
  418. return rc;
  419. }
  420. static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
  421. struct dfs_cache_tgt_iterator **target_hint)
  422. {
  423. int rc;
  424. struct dfs_cache_tgt_iterator *tit;
  425. *target_hint = NULL;
  426. /* If dfs target list is empty, then reconnect to last server */
  427. tit = dfs_cache_get_tgt_iterator(tl);
  428. if (!tit)
  429. return __reconnect_target_unlocked(server, server->hostname);
  430. /* Otherwise, try every dfs target in @tl */
  431. for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
  432. rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
  433. if (!rc) {
  434. *target_hint = tit;
  435. break;
  436. }
  437. }
  438. return rc;
  439. }
  440. static int reconnect_dfs_server(struct TCP_Server_Info *server)
  441. {
  442. int rc = 0;
  443. const char *refpath = server->current_fullpath + 1;
  444. struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
  445. struct dfs_cache_tgt_iterator *target_hint = NULL;
  446. int num_targets = 0;
  447. /*
  448. * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
  449. *
  450. * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
  451. * targets (server->nr_targets). It's also possible that the cached referral was cleared
  452. * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
  453. * refreshing the referral, so, in this case, default it to 1.
  454. */
  455. if (!dfs_cache_noreq_find(refpath, NULL, &tl))
  456. num_targets = dfs_cache_get_nr_tgts(&tl);
  457. if (!num_targets)
  458. num_targets = 1;
  459. if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
  460. return 0;
  461. /*
  462. * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
  463. * different server or share during failover. It could be improved by adding some logic to
  464. * only do that in case it connects to a different server or share, though.
  465. */
  466. cifs_mark_tcp_ses_conns_for_reconnect(server, true);
  467. cifs_abort_connection(server);
  468. do {
  469. try_to_freeze();
  470. cifs_server_lock(server);
  471. rc = reconnect_target_unlocked(server, &tl, &target_hint);
  472. if (rc) {
  473. /* Failed to reconnect socket */
  474. cifs_server_unlock(server);
  475. cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
  476. msleep(3000);
  477. continue;
  478. }
  479. /*
  480. * Socket was created. Update tcp session status to CifsNeedNegotiate so that a
  481. * process waiting for reconnect will know it needs to re-establish session and tcon
  482. * through the reconnected target server.
  483. */
  484. atomic_inc(&tcpSesReconnectCount);
  485. set_credits(server, 1);
  486. spin_lock(&server->srv_lock);
  487. if (server->tcpStatus != CifsExiting)
  488. server->tcpStatus = CifsNeedNegotiate;
  489. spin_unlock(&server->srv_lock);
  490. cifs_swn_reset_server_dstaddr(server);
  491. cifs_server_unlock(server);
  492. mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
  493. } while (server->tcpStatus == CifsNeedReconnect);
  494. if (target_hint)
  495. dfs_cache_noreq_update_tgthint(refpath, target_hint);
  496. dfs_cache_free_tgts(&tl);
  497. /* Need to set up echo worker again once connection has been established */
  498. spin_lock(&server->srv_lock);
  499. if (server->tcpStatus == CifsNeedNegotiate)
  500. mod_delayed_work(cifsiod_wq, &server->echo, 0);
  501. spin_unlock(&server->srv_lock);
  502. wake_up(&server->response_q);
  503. return rc;
  504. }
  505. int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
  506. {
  507. /* If tcp session is not an dfs connection, then reconnect to last target server */
  508. spin_lock(&server->srv_lock);
  509. if (!server->is_dfs_conn) {
  510. spin_unlock(&server->srv_lock);
  511. return __cifs_reconnect(server, mark_smb_session);
  512. }
  513. spin_unlock(&server->srv_lock);
  514. mutex_lock(&server->refpath_lock);
  515. if (!server->origin_fullpath || !server->leaf_fullpath) {
  516. mutex_unlock(&server->refpath_lock);
  517. return __cifs_reconnect(server, mark_smb_session);
  518. }
  519. mutex_unlock(&server->refpath_lock);
  520. return reconnect_dfs_server(server);
  521. }
  522. #else
  523. int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
  524. {
  525. return __cifs_reconnect(server, mark_smb_session);
  526. }
  527. #endif
  528. static void
  529. cifs_echo_request(struct work_struct *work)
  530. {
  531. int rc;
  532. struct TCP_Server_Info *server = container_of(work,
  533. struct TCP_Server_Info, echo.work);
  534. /*
  535. * We cannot send an echo if it is disabled.
  536. * Also, no need to ping if we got a response recently.
  537. */
  538. if (server->tcpStatus == CifsNeedReconnect ||
  539. server->tcpStatus == CifsExiting ||
  540. server->tcpStatus == CifsNew ||
  541. (server->ops->can_echo && !server->ops->can_echo(server)) ||
  542. time_before(jiffies, server->lstrp + server->echo_interval - HZ))
  543. goto requeue_echo;
  544. rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
  545. cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
  546. /* Check witness registrations */
  547. cifs_swn_check();
  548. requeue_echo:
  549. queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
  550. }
  551. static bool
  552. allocate_buffers(struct TCP_Server_Info *server)
  553. {
  554. if (!server->bigbuf) {
  555. server->bigbuf = (char *)cifs_buf_get();
  556. if (!server->bigbuf) {
  557. cifs_server_dbg(VFS, "No memory for large SMB response\n");
  558. msleep(3000);
  559. /* retry will check if exiting */
  560. return false;
  561. }
  562. } else if (server->large_buf) {
  563. /* we are reusing a dirty large buf, clear its start */
  564. memset(server->bigbuf, 0, HEADER_SIZE(server));
  565. }
  566. if (!server->smallbuf) {
  567. server->smallbuf = (char *)cifs_small_buf_get();
  568. if (!server->smallbuf) {
  569. cifs_server_dbg(VFS, "No memory for SMB response\n");
  570. msleep(1000);
  571. /* retry will check if exiting */
  572. return false;
  573. }
  574. /* beginning of smb buffer is cleared in our buf_get */
  575. } else {
  576. /* if existing small buf clear beginning */
  577. memset(server->smallbuf, 0, HEADER_SIZE(server));
  578. }
  579. return true;
  580. }
  581. static bool
  582. server_unresponsive(struct TCP_Server_Info *server)
  583. {
  584. /*
  585. * We need to wait 3 echo intervals to make sure we handle such
  586. * situations right:
  587. * 1s client sends a normal SMB request
  588. * 2s client gets a response
  589. * 30s echo workqueue job pops, and decides we got a response recently
  590. * and don't need to send another
  591. * ...
  592. * 65s kernel_recvmsg times out, and we see that we haven't gotten
  593. * a response in >60s.
  594. */
  595. spin_lock(&server->srv_lock);
  596. if ((server->tcpStatus == CifsGood ||
  597. server->tcpStatus == CifsNeedNegotiate) &&
  598. (!server->ops->can_echo || server->ops->can_echo(server)) &&
  599. time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
  600. spin_unlock(&server->srv_lock);
  601. cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
  602. (3 * server->echo_interval) / HZ);
  603. cifs_reconnect(server, false);
  604. return true;
  605. }
  606. spin_unlock(&server->srv_lock);
  607. return false;
  608. }
  609. static inline bool
  610. zero_credits(struct TCP_Server_Info *server)
  611. {
  612. int val;
  613. spin_lock(&server->req_lock);
  614. val = server->credits + server->echo_credits + server->oplock_credits;
  615. if (server->in_flight == 0 && val == 0) {
  616. spin_unlock(&server->req_lock);
  617. return true;
  618. }
  619. spin_unlock(&server->req_lock);
  620. return false;
  621. }
  622. static int
  623. cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
  624. {
  625. int length = 0;
  626. int total_read;
  627. for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
  628. try_to_freeze();
  629. /* reconnect if no credits and no requests in flight */
  630. if (zero_credits(server)) {
  631. cifs_reconnect(server, false);
  632. return -ECONNABORTED;
  633. }
  634. if (server_unresponsive(server))
  635. return -ECONNABORTED;
  636. if (cifs_rdma_enabled(server) && server->smbd_conn)
  637. length = smbd_recv(server->smbd_conn, smb_msg);
  638. else
  639. length = sock_recvmsg(server->ssocket, smb_msg, 0);
  640. spin_lock(&server->srv_lock);
  641. if (server->tcpStatus == CifsExiting) {
  642. spin_unlock(&server->srv_lock);
  643. return -ESHUTDOWN;
  644. }
  645. if (server->tcpStatus == CifsNeedReconnect) {
  646. spin_unlock(&server->srv_lock);
  647. cifs_reconnect(server, false);
  648. return -ECONNABORTED;
  649. }
  650. spin_unlock(&server->srv_lock);
  651. if (length == -ERESTARTSYS ||
  652. length == -EAGAIN ||
  653. length == -EINTR) {
  654. /*
  655. * Minimum sleep to prevent looping, allowing socket
  656. * to clear and app threads to set tcpStatus
  657. * CifsNeedReconnect if server hung.
  658. */
  659. usleep_range(1000, 2000);
  660. length = 0;
  661. continue;
  662. }
  663. if (length <= 0) {
  664. cifs_dbg(FYI, "Received no data or error: %d\n", length);
  665. cifs_reconnect(server, false);
  666. return -ECONNABORTED;
  667. }
  668. }
  669. return total_read;
  670. }
  671. int
  672. cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
  673. unsigned int to_read)
  674. {
  675. struct msghdr smb_msg = {};
  676. struct kvec iov = {.iov_base = buf, .iov_len = to_read};
  677. iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
  678. return cifs_readv_from_socket(server, &smb_msg);
  679. }
  680. ssize_t
  681. cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
  682. {
  683. struct msghdr smb_msg = {};
  684. /*
  685. * iov_iter_discard already sets smb_msg.type and count and iov_offset
  686. * and cifs_readv_from_socket sets msg_control and msg_controllen
  687. * so little to initialize in struct msghdr
  688. */
  689. iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
  690. return cifs_readv_from_socket(server, &smb_msg);
  691. }
  692. int
  693. cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
  694. unsigned int page_offset, unsigned int to_read)
  695. {
  696. struct msghdr smb_msg = {};
  697. struct bio_vec bv = {
  698. .bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
  699. iov_iter_bvec(&smb_msg.msg_iter, ITER_DEST, &bv, 1, to_read);
  700. return cifs_readv_from_socket(server, &smb_msg);
  701. }
  702. static bool
  703. is_smb_response(struct TCP_Server_Info *server, unsigned char type)
  704. {
  705. /*
  706. * The first byte big endian of the length field,
  707. * is actually not part of the length but the type
  708. * with the most common, zero, as regular data.
  709. */
  710. switch (type) {
  711. case RFC1002_SESSION_MESSAGE:
  712. /* Regular SMB response */
  713. return true;
  714. case RFC1002_SESSION_KEEP_ALIVE:
  715. cifs_dbg(FYI, "RFC 1002 session keep alive\n");
  716. break;
  717. case RFC1002_POSITIVE_SESSION_RESPONSE:
  718. cifs_dbg(FYI, "RFC 1002 positive session response\n");
  719. break;
  720. case RFC1002_NEGATIVE_SESSION_RESPONSE:
  721. /*
  722. * We get this from Windows 98 instead of an error on
  723. * SMB negprot response.
  724. */
  725. cifs_dbg(FYI, "RFC 1002 negative session response\n");
  726. /* give server a second to clean up */
  727. msleep(1000);
  728. /*
  729. * Always try 445 first on reconnect since we get NACK
  730. * on some if we ever connected to port 139 (the NACK
  731. * is since we do not begin with RFC1001 session
  732. * initialize frame).
  733. */
  734. cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
  735. cifs_reconnect(server, true);
  736. break;
  737. default:
  738. cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
  739. cifs_reconnect(server, true);
  740. }
  741. return false;
  742. }
  743. void
  744. dequeue_mid(struct mid_q_entry *mid, bool malformed)
  745. {
  746. #ifdef CONFIG_CIFS_STATS2
  747. mid->when_received = jiffies;
  748. #endif
  749. spin_lock(&mid->server->mid_lock);
  750. if (!malformed)
  751. mid->mid_state = MID_RESPONSE_RECEIVED;
  752. else
  753. mid->mid_state = MID_RESPONSE_MALFORMED;
  754. /*
  755. * Trying to handle/dequeue a mid after the send_recv()
  756. * function has finished processing it is a bug.
  757. */
  758. if (mid->mid_flags & MID_DELETED) {
  759. spin_unlock(&mid->server->mid_lock);
  760. pr_warn_once("trying to dequeue a deleted mid\n");
  761. } else {
  762. list_del_init(&mid->qhead);
  763. mid->mid_flags |= MID_DELETED;
  764. spin_unlock(&mid->server->mid_lock);
  765. }
  766. }
  767. static unsigned int
  768. smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
  769. {
  770. struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
  771. /*
  772. * SMB1 does not use credits.
  773. */
  774. if (is_smb1(server))
  775. return 0;
  776. return le16_to_cpu(shdr->CreditRequest);
  777. }
  778. static void
  779. handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
  780. char *buf, int malformed)
  781. {
  782. if (server->ops->check_trans2 &&
  783. server->ops->check_trans2(mid, server, buf, malformed))
  784. return;
  785. mid->credits_received = smb2_get_credits_from_hdr(buf, server);
  786. mid->resp_buf = buf;
  787. mid->large_buf = server->large_buf;
  788. /* Was previous buf put in mpx struct for multi-rsp? */
  789. if (!mid->multiRsp) {
  790. /* smb buffer will be freed by user thread */
  791. if (server->large_buf)
  792. server->bigbuf = NULL;
  793. else
  794. server->smallbuf = NULL;
  795. }
  796. dequeue_mid(mid, malformed);
  797. }
  798. int
  799. cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
  800. {
  801. bool srv_sign_required = server->sec_mode & server->vals->signing_required;
  802. bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
  803. bool mnt_sign_enabled;
  804. /*
  805. * Is signing required by mnt options? If not then check
  806. * global_secflags to see if it is there.
  807. */
  808. if (!mnt_sign_required)
  809. mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
  810. CIFSSEC_MUST_SIGN);
  811. /*
  812. * If signing is required then it's automatically enabled too,
  813. * otherwise, check to see if the secflags allow it.
  814. */
  815. mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
  816. (global_secflags & CIFSSEC_MAY_SIGN);
  817. /* If server requires signing, does client allow it? */
  818. if (srv_sign_required) {
  819. if (!mnt_sign_enabled) {
  820. cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
  821. return -EOPNOTSUPP;
  822. }
  823. server->sign = true;
  824. }
  825. /* If client requires signing, does server allow it? */
  826. if (mnt_sign_required) {
  827. if (!srv_sign_enabled) {
  828. cifs_dbg(VFS, "Server does not support signing!\n");
  829. return -EOPNOTSUPP;
  830. }
  831. server->sign = true;
  832. }
  833. if (cifs_rdma_enabled(server) && server->sign)
  834. cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
  835. return 0;
  836. }
  837. static void clean_demultiplex_info(struct TCP_Server_Info *server)
  838. {
  839. int length;
  840. /* take it off the list, if it's not already */
  841. spin_lock(&server->srv_lock);
  842. list_del_init(&server->tcp_ses_list);
  843. spin_unlock(&server->srv_lock);
  844. cancel_delayed_work_sync(&server->echo);
  845. cancel_delayed_work_sync(&server->resolve);
  846. spin_lock(&server->srv_lock);
  847. server->tcpStatus = CifsExiting;
  848. spin_unlock(&server->srv_lock);
  849. wake_up_all(&server->response_q);
  850. /* check if we have blocked requests that need to free */
  851. spin_lock(&server->req_lock);
  852. if (server->credits <= 0)
  853. server->credits = 1;
  854. spin_unlock(&server->req_lock);
  855. /*
  856. * Although there should not be any requests blocked on this queue it
  857. * can not hurt to be paranoid and try to wake up requests that may
  858. * haven been blocked when more than 50 at time were on the wire to the
  859. * same server - they now will see the session is in exit state and get
  860. * out of SendReceive.
  861. */
  862. wake_up_all(&server->request_q);
  863. /* give those requests time to exit */
  864. msleep(125);
  865. if (cifs_rdma_enabled(server))
  866. smbd_destroy(server);
  867. if (server->ssocket) {
  868. sock_release(server->ssocket);
  869. server->ssocket = NULL;
  870. }
  871. if (!list_empty(&server->pending_mid_q)) {
  872. struct list_head dispose_list;
  873. struct mid_q_entry *mid_entry;
  874. struct list_head *tmp, *tmp2;
  875. INIT_LIST_HEAD(&dispose_list);
  876. spin_lock(&server->mid_lock);
  877. list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
  878. mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
  879. cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
  880. kref_get(&mid_entry->refcount);
  881. mid_entry->mid_state = MID_SHUTDOWN;
  882. list_move(&mid_entry->qhead, &dispose_list);
  883. mid_entry->mid_flags |= MID_DELETED;
  884. }
  885. spin_unlock(&server->mid_lock);
  886. /* now walk dispose list and issue callbacks */
  887. list_for_each_safe(tmp, tmp2, &dispose_list) {
  888. mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
  889. cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
  890. list_del_init(&mid_entry->qhead);
  891. mid_entry->callback(mid_entry);
  892. release_mid(mid_entry);
  893. }
  894. /* 1/8th of sec is more than enough time for them to exit */
  895. msleep(125);
  896. }
  897. if (!list_empty(&server->pending_mid_q)) {
  898. /*
  899. * mpx threads have not exited yet give them at least the smb
  900. * send timeout time for long ops.
  901. *
  902. * Due to delays on oplock break requests, we need to wait at
  903. * least 45 seconds before giving up on a request getting a
  904. * response and going ahead and killing cifsd.
  905. */
  906. cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
  907. msleep(46000);
  908. /*
  909. * If threads still have not exited they are probably never
  910. * coming home not much else we can do but free the memory.
  911. */
  912. }
  913. #ifdef CONFIG_CIFS_DFS_UPCALL
  914. kfree(server->origin_fullpath);
  915. kfree(server->leaf_fullpath);
  916. #endif
  917. kfree(server);
  918. length = atomic_dec_return(&tcpSesAllocCount);
  919. if (length > 0)
  920. mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
  921. }
  922. static int
  923. standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
  924. {
  925. int length;
  926. char *buf = server->smallbuf;
  927. unsigned int pdu_length = server->pdu_size;
  928. /* make sure this will fit in a large buffer */
  929. if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
  930. HEADER_PREAMBLE_SIZE(server)) {
  931. cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
  932. cifs_reconnect(server, true);
  933. return -ECONNABORTED;
  934. }
  935. /* switch to large buffer if too big for a small one */
  936. if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
  937. server->large_buf = true;
  938. memcpy(server->bigbuf, buf, server->total_read);
  939. buf = server->bigbuf;
  940. }
  941. /* now read the rest */
  942. length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
  943. pdu_length - MID_HEADER_SIZE(server));
  944. if (length < 0)
  945. return length;
  946. server->total_read += length;
  947. dump_smb(buf, server->total_read);
  948. return cifs_handle_standard(server, mid);
  949. }
  950. int
  951. cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
  952. {
  953. char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
  954. int rc;
  955. /*
  956. * We know that we received enough to get to the MID as we
  957. * checked the pdu_length earlier. Now check to see
  958. * if the rest of the header is OK.
  959. *
  960. * 48 bytes is enough to display the header and a little bit
  961. * into the payload for debugging purposes.
  962. */
  963. rc = server->ops->check_message(buf, server->total_read, server);
  964. if (rc)
  965. cifs_dump_mem("Bad SMB: ", buf,
  966. min_t(unsigned int, server->total_read, 48));
  967. if (server->ops->is_session_expired &&
  968. server->ops->is_session_expired(buf)) {
  969. cifs_reconnect(server, true);
  970. return -1;
  971. }
  972. if (server->ops->is_status_pending &&
  973. server->ops->is_status_pending(buf, server))
  974. return -1;
  975. if (!mid)
  976. return rc;
  977. handle_mid(mid, server, buf, rc);
  978. return 0;
  979. }
  980. static void
  981. smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
  982. {
  983. struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
  984. int scredits, in_flight;
  985. /*
  986. * SMB1 does not use credits.
  987. */
  988. if (is_smb1(server))
  989. return;
  990. if (shdr->CreditRequest) {
  991. spin_lock(&server->req_lock);
  992. server->credits += le16_to_cpu(shdr->CreditRequest);
  993. scredits = server->credits;
  994. in_flight = server->in_flight;
  995. spin_unlock(&server->req_lock);
  996. wake_up(&server->request_q);
  997. trace_smb3_hdr_credits(server->CurrentMid,
  998. server->conn_id, server->hostname, scredits,
  999. le16_to_cpu(shdr->CreditRequest), in_flight);
  1000. cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
  1001. __func__, le16_to_cpu(shdr->CreditRequest),
  1002. scredits);
  1003. }
  1004. }
  1005. static int
  1006. cifs_demultiplex_thread(void *p)
  1007. {
  1008. int i, num_mids, length;
  1009. struct TCP_Server_Info *server = p;
  1010. unsigned int pdu_length;
  1011. unsigned int next_offset;
  1012. char *buf = NULL;
  1013. struct task_struct *task_to_wake = NULL;
  1014. struct mid_q_entry *mids[MAX_COMPOUND];
  1015. char *bufs[MAX_COMPOUND];
  1016. unsigned int noreclaim_flag, num_io_timeout = 0;
  1017. bool pending_reconnect = false;
  1018. noreclaim_flag = memalloc_noreclaim_save();
  1019. cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
  1020. length = atomic_inc_return(&tcpSesAllocCount);
  1021. if (length > 1)
  1022. mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
  1023. set_freezable();
  1024. allow_kernel_signal(SIGKILL);
  1025. while (server->tcpStatus != CifsExiting) {
  1026. if (try_to_freeze())
  1027. continue;
  1028. if (!allocate_buffers(server))
  1029. continue;
  1030. server->large_buf = false;
  1031. buf = server->smallbuf;
  1032. pdu_length = 4; /* enough to get RFC1001 header */
  1033. length = cifs_read_from_socket(server, buf, pdu_length);
  1034. if (length < 0)
  1035. continue;
  1036. if (is_smb1(server))
  1037. server->total_read = length;
  1038. else
  1039. server->total_read = 0;
  1040. /*
  1041. * The right amount was read from socket - 4 bytes,
  1042. * so we can now interpret the length field.
  1043. */
  1044. pdu_length = get_rfc1002_length(buf);
  1045. cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
  1046. if (!is_smb_response(server, buf[0]))
  1047. continue;
  1048. pending_reconnect = false;
  1049. next_pdu:
  1050. server->pdu_size = pdu_length;
  1051. /* make sure we have enough to get to the MID */
  1052. if (server->pdu_size < MID_HEADER_SIZE(server)) {
  1053. cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
  1054. server->pdu_size);
  1055. cifs_reconnect(server, true);
  1056. continue;
  1057. }
  1058. /* read down to the MID */
  1059. length = cifs_read_from_socket(server,
  1060. buf + HEADER_PREAMBLE_SIZE(server),
  1061. MID_HEADER_SIZE(server));
  1062. if (length < 0)
  1063. continue;
  1064. server->total_read += length;
  1065. if (server->ops->next_header) {
  1066. next_offset = server->ops->next_header(buf);
  1067. if (next_offset)
  1068. server->pdu_size = next_offset;
  1069. }
  1070. memset(mids, 0, sizeof(mids));
  1071. memset(bufs, 0, sizeof(bufs));
  1072. num_mids = 0;
  1073. if (server->ops->is_transform_hdr &&
  1074. server->ops->receive_transform &&
  1075. server->ops->is_transform_hdr(buf)) {
  1076. length = server->ops->receive_transform(server,
  1077. mids,
  1078. bufs,
  1079. &num_mids);
  1080. } else {
  1081. mids[0] = server->ops->find_mid(server, buf);
  1082. bufs[0] = buf;
  1083. num_mids = 1;
  1084. if (!mids[0] || !mids[0]->receive)
  1085. length = standard_receive3(server, mids[0]);
  1086. else
  1087. length = mids[0]->receive(server, mids[0]);
  1088. }
  1089. if (length < 0) {
  1090. for (i = 0; i < num_mids; i++)
  1091. if (mids[i])
  1092. release_mid(mids[i]);
  1093. continue;
  1094. }
  1095. if (server->ops->is_status_io_timeout &&
  1096. server->ops->is_status_io_timeout(buf)) {
  1097. num_io_timeout++;
  1098. if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
  1099. cifs_server_dbg(VFS,
  1100. "Number of request timeouts exceeded %d. Reconnecting",
  1101. MAX_STATUS_IO_TIMEOUT);
  1102. pending_reconnect = true;
  1103. num_io_timeout = 0;
  1104. }
  1105. }
  1106. server->lstrp = jiffies;
  1107. for (i = 0; i < num_mids; i++) {
  1108. if (mids[i] != NULL) {
  1109. mids[i]->resp_buf_size = server->pdu_size;
  1110. if (bufs[i] && server->ops->is_network_name_deleted)
  1111. server->ops->is_network_name_deleted(bufs[i],
  1112. server);
  1113. if (!mids[i]->multiRsp || mids[i]->multiEnd)
  1114. mids[i]->callback(mids[i]);
  1115. release_mid(mids[i]);
  1116. } else if (server->ops->is_oplock_break &&
  1117. server->ops->is_oplock_break(bufs[i],
  1118. server)) {
  1119. smb2_add_credits_from_hdr(bufs[i], server);
  1120. cifs_dbg(FYI, "Received oplock break\n");
  1121. } else {
  1122. cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
  1123. atomic_read(&mid_count));
  1124. cifs_dump_mem("Received Data is: ", bufs[i],
  1125. HEADER_SIZE(server));
  1126. smb2_add_credits_from_hdr(bufs[i], server);
  1127. #ifdef CONFIG_CIFS_DEBUG2
  1128. if (server->ops->dump_detail)
  1129. server->ops->dump_detail(bufs[i],
  1130. server);
  1131. cifs_dump_mids(server);
  1132. #endif /* CIFS_DEBUG2 */
  1133. }
  1134. }
  1135. if (pdu_length > server->pdu_size) {
  1136. if (!allocate_buffers(server))
  1137. continue;
  1138. pdu_length -= server->pdu_size;
  1139. server->total_read = 0;
  1140. server->large_buf = false;
  1141. buf = server->smallbuf;
  1142. goto next_pdu;
  1143. }
  1144. /* do this reconnect at the very end after processing all MIDs */
  1145. if (pending_reconnect)
  1146. cifs_reconnect(server, true);
  1147. } /* end while !EXITING */
  1148. /* buffer usually freed in free_mid - need to free it here on exit */
  1149. cifs_buf_release(server->bigbuf);
  1150. if (server->smallbuf) /* no sense logging a debug message if NULL */
  1151. cifs_small_buf_release(server->smallbuf);
  1152. task_to_wake = xchg(&server->tsk, NULL);
  1153. clean_demultiplex_info(server);
  1154. /* if server->tsk was NULL then wait for a signal before exiting */
  1155. if (!task_to_wake) {
  1156. set_current_state(TASK_INTERRUPTIBLE);
  1157. while (!signal_pending(current)) {
  1158. schedule();
  1159. set_current_state(TASK_INTERRUPTIBLE);
  1160. }
  1161. set_current_state(TASK_RUNNING);
  1162. }
  1163. memalloc_noreclaim_restore(noreclaim_flag);
  1164. module_put_and_kthread_exit(0);
  1165. }
  1166. int
  1167. cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs)
  1168. {
  1169. struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
  1170. struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
  1171. struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
  1172. struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
  1173. switch (srcaddr->sa_family) {
  1174. case AF_UNSPEC:
  1175. switch (rhs->sa_family) {
  1176. case AF_UNSPEC:
  1177. return 0;
  1178. case AF_INET:
  1179. case AF_INET6:
  1180. return 1;
  1181. default:
  1182. return -1;
  1183. }
  1184. case AF_INET: {
  1185. switch (rhs->sa_family) {
  1186. case AF_UNSPEC:
  1187. return -1;
  1188. case AF_INET:
  1189. return memcmp(saddr4, vaddr4,
  1190. sizeof(struct sockaddr_in));
  1191. case AF_INET6:
  1192. return 1;
  1193. default:
  1194. return -1;
  1195. }
  1196. }
  1197. case AF_INET6: {
  1198. switch (rhs->sa_family) {
  1199. case AF_UNSPEC:
  1200. case AF_INET:
  1201. return -1;
  1202. case AF_INET6:
  1203. return memcmp(saddr6,
  1204. vaddr6,
  1205. sizeof(struct sockaddr_in6));
  1206. default:
  1207. return -1;
  1208. }
  1209. }
  1210. default:
  1211. return -1; /* don't expect to be here */
  1212. }
  1213. }
  1214. /*
  1215. * Returns true if srcaddr isn't specified and rhs isn't specified, or
  1216. * if srcaddr is specified and matches the IP address of the rhs argument
  1217. */
  1218. bool
  1219. cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
  1220. {
  1221. switch (srcaddr->sa_family) {
  1222. case AF_UNSPEC:
  1223. return (rhs->sa_family == AF_UNSPEC);
  1224. case AF_INET: {
  1225. struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
  1226. struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
  1227. return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
  1228. }
  1229. case AF_INET6: {
  1230. struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
  1231. struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
  1232. return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
  1233. }
  1234. default:
  1235. WARN_ON(1);
  1236. return false; /* don't expect to be here */
  1237. }
  1238. }
  1239. /*
  1240. * If no port is specified in addr structure, we try to match with 445 port
  1241. * and if it fails - with 139 ports. It should be called only if address
  1242. * families of server and addr are equal.
  1243. */
  1244. static bool
  1245. match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
  1246. {
  1247. __be16 port, *sport;
  1248. /* SMBDirect manages its own ports, don't match it here */
  1249. if (server->rdma)
  1250. return true;
  1251. switch (addr->sa_family) {
  1252. case AF_INET:
  1253. sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
  1254. port = ((struct sockaddr_in *) addr)->sin_port;
  1255. break;
  1256. case AF_INET6:
  1257. sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
  1258. port = ((struct sockaddr_in6 *) addr)->sin6_port;
  1259. break;
  1260. default:
  1261. WARN_ON(1);
  1262. return false;
  1263. }
  1264. if (!port) {
  1265. port = htons(CIFS_PORT);
  1266. if (port == *sport)
  1267. return true;
  1268. port = htons(RFC1001_PORT);
  1269. }
  1270. return port == *sport;
  1271. }
  1272. static bool
  1273. match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
  1274. struct sockaddr *srcaddr)
  1275. {
  1276. switch (addr->sa_family) {
  1277. case AF_INET: {
  1278. struct sockaddr_in *addr4 = (struct sockaddr_in *)addr;
  1279. struct sockaddr_in *srv_addr4 =
  1280. (struct sockaddr_in *)&server->dstaddr;
  1281. if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr)
  1282. return false;
  1283. break;
  1284. }
  1285. case AF_INET6: {
  1286. struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr;
  1287. struct sockaddr_in6 *srv_addr6 =
  1288. (struct sockaddr_in6 *)&server->dstaddr;
  1289. if (!ipv6_addr_equal(&addr6->sin6_addr,
  1290. &srv_addr6->sin6_addr))
  1291. return false;
  1292. if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id)
  1293. return false;
  1294. break;
  1295. }
  1296. default:
  1297. WARN_ON(1);
  1298. return false; /* don't expect to be here */
  1299. }
  1300. if (!cifs_match_ipaddr(srcaddr, (struct sockaddr *)&server->srcaddr))
  1301. return false;
  1302. return true;
  1303. }
  1304. static bool
  1305. match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
  1306. {
  1307. /*
  1308. * The select_sectype function should either return the ctx->sectype
  1309. * that was specified, or "Unspecified" if that sectype was not
  1310. * compatible with the given NEGOTIATE request.
  1311. */
  1312. if (server->ops->select_sectype(server, ctx->sectype)
  1313. == Unspecified)
  1314. return false;
  1315. /*
  1316. * Now check if signing mode is acceptable. No need to check
  1317. * global_secflags at this point since if MUST_SIGN is set then
  1318. * the server->sign had better be too.
  1319. */
  1320. if (ctx->sign && !server->sign)
  1321. return false;
  1322. return true;
  1323. }
  1324. /* this function must be called with srv_lock held */
  1325. static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
  1326. {
  1327. struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
  1328. lockdep_assert_held(&server->srv_lock);
  1329. if (ctx->nosharesock)
  1330. return 0;
  1331. /* this server does not share socket */
  1332. if (server->nosharesock)
  1333. return 0;
  1334. /* If multidialect negotiation see if existing sessions match one */
  1335. if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
  1336. if (server->vals->protocol_id < SMB30_PROT_ID)
  1337. return 0;
  1338. } else if (strcmp(ctx->vals->version_string,
  1339. SMBDEFAULT_VERSION_STRING) == 0) {
  1340. if (server->vals->protocol_id < SMB21_PROT_ID)
  1341. return 0;
  1342. } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
  1343. return 0;
  1344. if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
  1345. return 0;
  1346. if (strcasecmp(server->hostname, ctx->server_hostname))
  1347. return 0;
  1348. if (!match_address(server, addr,
  1349. (struct sockaddr *)&ctx->srcaddr))
  1350. return 0;
  1351. if (!match_port(server, addr))
  1352. return 0;
  1353. if (!match_security(server, ctx))
  1354. return 0;
  1355. if (server->echo_interval != ctx->echo_interval * HZ)
  1356. return 0;
  1357. if (server->rdma != ctx->rdma)
  1358. return 0;
  1359. if (server->ignore_signature != ctx->ignore_signature)
  1360. return 0;
  1361. if (server->min_offload != ctx->min_offload)
  1362. return 0;
  1363. return 1;
  1364. }
  1365. struct TCP_Server_Info *
  1366. cifs_find_tcp_session(struct smb3_fs_context *ctx)
  1367. {
  1368. struct TCP_Server_Info *server;
  1369. spin_lock(&cifs_tcp_ses_lock);
  1370. list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
  1371. spin_lock(&server->srv_lock);
  1372. #ifdef CONFIG_CIFS_DFS_UPCALL
  1373. /*
  1374. * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for
  1375. * DFS connections to do failover properly, so avoid sharing them with regular
  1376. * shares or even links that may connect to same server but having completely
  1377. * different failover targets.
  1378. */
  1379. if (server->is_dfs_conn) {
  1380. spin_unlock(&server->srv_lock);
  1381. continue;
  1382. }
  1383. #endif
  1384. /*
  1385. * Skip ses channels since they're only handled in lower layers
  1386. * (e.g. cifs_send_recv).
  1387. */
  1388. if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) {
  1389. spin_unlock(&server->srv_lock);
  1390. continue;
  1391. }
  1392. spin_unlock(&server->srv_lock);
  1393. ++server->srv_count;
  1394. spin_unlock(&cifs_tcp_ses_lock);
  1395. cifs_dbg(FYI, "Existing tcp session with server found\n");
  1396. return server;
  1397. }
  1398. spin_unlock(&cifs_tcp_ses_lock);
  1399. return NULL;
  1400. }
  1401. void
  1402. cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
  1403. {
  1404. struct task_struct *task;
  1405. spin_lock(&cifs_tcp_ses_lock);
  1406. if (--server->srv_count > 0) {
  1407. spin_unlock(&cifs_tcp_ses_lock);
  1408. return;
  1409. }
  1410. /* srv_count can never go negative */
  1411. WARN_ON(server->srv_count < 0);
  1412. put_net(cifs_net_ns(server));
  1413. list_del_init(&server->tcp_ses_list);
  1414. spin_unlock(&cifs_tcp_ses_lock);
  1415. /* For secondary channels, we pick up ref-count on the primary server */
  1416. if (CIFS_SERVER_IS_CHAN(server))
  1417. cifs_put_tcp_session(server->primary_server, from_reconnect);
  1418. cancel_delayed_work_sync(&server->echo);
  1419. cancel_delayed_work_sync(&server->resolve);
  1420. if (from_reconnect)
  1421. /*
  1422. * Avoid deadlock here: reconnect work calls
  1423. * cifs_put_tcp_session() at its end. Need to be sure
  1424. * that reconnect work does nothing with server pointer after
  1425. * that step.
  1426. */
  1427. cancel_delayed_work(&server->reconnect);
  1428. else
  1429. cancel_delayed_work_sync(&server->reconnect);
  1430. spin_lock(&server->srv_lock);
  1431. server->tcpStatus = CifsExiting;
  1432. spin_unlock(&server->srv_lock);
  1433. cifs_crypto_secmech_release(server);
  1434. kfree_sensitive(server->session_key.response);
  1435. server->session_key.response = NULL;
  1436. server->session_key.len = 0;
  1437. kfree(server->hostname);
  1438. server->hostname = NULL;
  1439. task = xchg(&server->tsk, NULL);
  1440. if (task)
  1441. send_sig(SIGKILL, task, 1);
  1442. }
  1443. struct TCP_Server_Info *
  1444. cifs_get_tcp_session(struct smb3_fs_context *ctx,
  1445. struct TCP_Server_Info *primary_server)
  1446. {
  1447. struct TCP_Server_Info *tcp_ses = NULL;
  1448. int rc;
  1449. cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
  1450. /* see if we already have a matching tcp_ses */
  1451. tcp_ses = cifs_find_tcp_session(ctx);
  1452. if (tcp_ses)
  1453. return tcp_ses;
  1454. tcp_ses = kzalloc(sizeof(struct TCP_Server_Info), GFP_KERNEL);
  1455. if (!tcp_ses) {
  1456. rc = -ENOMEM;
  1457. goto out_err;
  1458. }
  1459. tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
  1460. if (!tcp_ses->hostname) {
  1461. rc = -ENOMEM;
  1462. goto out_err;
  1463. }
  1464. if (ctx->nosharesock)
  1465. tcp_ses->nosharesock = true;
  1466. tcp_ses->ops = ctx->ops;
  1467. tcp_ses->vals = ctx->vals;
  1468. cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
  1469. tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
  1470. tcp_ses->noblockcnt = ctx->rootfs;
  1471. tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
  1472. tcp_ses->noautotune = ctx->noautotune;
  1473. tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
  1474. tcp_ses->rdma = ctx->rdma;
  1475. tcp_ses->in_flight = 0;
  1476. tcp_ses->max_in_flight = 0;
  1477. tcp_ses->credits = 1;
  1478. if (primary_server) {
  1479. spin_lock(&cifs_tcp_ses_lock);
  1480. ++primary_server->srv_count;
  1481. spin_unlock(&cifs_tcp_ses_lock);
  1482. tcp_ses->primary_server = primary_server;
  1483. }
  1484. init_waitqueue_head(&tcp_ses->response_q);
  1485. init_waitqueue_head(&tcp_ses->request_q);
  1486. INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
  1487. mutex_init(&tcp_ses->_srv_mutex);
  1488. memcpy(tcp_ses->workstation_RFC1001_name,
  1489. ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
  1490. memcpy(tcp_ses->server_RFC1001_name,
  1491. ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
  1492. tcp_ses->session_estab = false;
  1493. tcp_ses->sequence_number = 0;
  1494. tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
  1495. tcp_ses->reconnect_instance = 1;
  1496. tcp_ses->lstrp = jiffies;
  1497. tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression);
  1498. spin_lock_init(&tcp_ses->req_lock);
  1499. spin_lock_init(&tcp_ses->srv_lock);
  1500. spin_lock_init(&tcp_ses->mid_lock);
  1501. INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
  1502. INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
  1503. INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
  1504. INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
  1505. INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
  1506. mutex_init(&tcp_ses->reconnect_mutex);
  1507. #ifdef CONFIG_CIFS_DFS_UPCALL
  1508. mutex_init(&tcp_ses->refpath_lock);
  1509. #endif
  1510. memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
  1511. sizeof(tcp_ses->srcaddr));
  1512. memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
  1513. sizeof(tcp_ses->dstaddr));
  1514. if (ctx->use_client_guid)
  1515. memcpy(tcp_ses->client_guid, ctx->client_guid,
  1516. SMB2_CLIENT_GUID_SIZE);
  1517. else
  1518. generate_random_uuid(tcp_ses->client_guid);
  1519. /*
  1520. * at this point we are the only ones with the pointer
  1521. * to the struct since the kernel thread not created yet
  1522. * no need to spinlock this init of tcpStatus or srv_count
  1523. */
  1524. tcp_ses->tcpStatus = CifsNew;
  1525. ++tcp_ses->srv_count;
  1526. if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
  1527. ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
  1528. tcp_ses->echo_interval = ctx->echo_interval * HZ;
  1529. else
  1530. tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
  1531. if (tcp_ses->rdma) {
  1532. #ifndef CONFIG_CIFS_SMB_DIRECT
  1533. cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
  1534. rc = -ENOENT;
  1535. goto out_err_crypto_release;
  1536. #endif
  1537. tcp_ses->smbd_conn = smbd_get_connection(
  1538. tcp_ses, (struct sockaddr *)&ctx->dstaddr);
  1539. if (tcp_ses->smbd_conn) {
  1540. cifs_dbg(VFS, "RDMA transport established\n");
  1541. rc = 0;
  1542. goto smbd_connected;
  1543. } else {
  1544. rc = -ENOENT;
  1545. goto out_err_crypto_release;
  1546. }
  1547. }
  1548. rc = ip_connect(tcp_ses);
  1549. if (rc < 0) {
  1550. cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
  1551. goto out_err_crypto_release;
  1552. }
  1553. smbd_connected:
  1554. /*
  1555. * since we're in a cifs function already, we know that
  1556. * this will succeed. No need for try_module_get().
  1557. */
  1558. __module_get(THIS_MODULE);
  1559. tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
  1560. tcp_ses, "cifsd");
  1561. if (IS_ERR(tcp_ses->tsk)) {
  1562. rc = PTR_ERR(tcp_ses->tsk);
  1563. cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
  1564. module_put(THIS_MODULE);
  1565. goto out_err_crypto_release;
  1566. }
  1567. tcp_ses->min_offload = ctx->min_offload;
  1568. /*
  1569. * at this point we are the only ones with the pointer
  1570. * to the struct since the kernel thread not created yet
  1571. * no need to spinlock this update of tcpStatus
  1572. */
  1573. spin_lock(&tcp_ses->srv_lock);
  1574. tcp_ses->tcpStatus = CifsNeedNegotiate;
  1575. spin_unlock(&tcp_ses->srv_lock);
  1576. if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
  1577. tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
  1578. else
  1579. tcp_ses->max_credits = ctx->max_credits;
  1580. tcp_ses->nr_targets = 1;
  1581. tcp_ses->ignore_signature = ctx->ignore_signature;
  1582. /* thread spawned, put it on the list */
  1583. spin_lock(&cifs_tcp_ses_lock);
  1584. list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
  1585. spin_unlock(&cifs_tcp_ses_lock);
  1586. /* queue echo request delayed work */
  1587. queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
  1588. /* queue dns resolution delayed work */
  1589. cifs_dbg(FYI, "%s: next dns resolution scheduled for %d seconds in the future\n",
  1590. __func__, SMB_DNS_RESOLVE_INTERVAL_DEFAULT);
  1591. queue_delayed_work(cifsiod_wq, &tcp_ses->resolve, (SMB_DNS_RESOLVE_INTERVAL_DEFAULT * HZ));
  1592. return tcp_ses;
  1593. out_err_crypto_release:
  1594. cifs_crypto_secmech_release(tcp_ses);
  1595. put_net(cifs_net_ns(tcp_ses));
  1596. out_err:
  1597. if (tcp_ses) {
  1598. if (CIFS_SERVER_IS_CHAN(tcp_ses))
  1599. cifs_put_tcp_session(tcp_ses->primary_server, false);
  1600. kfree(tcp_ses->hostname);
  1601. if (tcp_ses->ssocket)
  1602. sock_release(tcp_ses->ssocket);
  1603. kfree(tcp_ses);
  1604. }
  1605. return ERR_PTR(rc);
  1606. }
  1607. /* this function must be called with ses_lock and chan_lock held */
  1608. static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
  1609. {
  1610. if (ctx->sectype != Unspecified &&
  1611. ctx->sectype != ses->sectype)
  1612. return 0;
  1613. /*
  1614. * If an existing session is limited to less channels than
  1615. * requested, it should not be reused
  1616. */
  1617. if (ses->chan_max < ctx->max_channels)
  1618. return 0;
  1619. switch (ses->sectype) {
  1620. case Kerberos:
  1621. if (!uid_eq(ctx->cred_uid, ses->cred_uid))
  1622. return 0;
  1623. break;
  1624. default:
  1625. /* NULL username means anonymous session */
  1626. if (ses->user_name == NULL) {
  1627. if (!ctx->nullauth)
  1628. return 0;
  1629. break;
  1630. }
  1631. /* anything else takes username/password */
  1632. if (strncmp(ses->user_name,
  1633. ctx->username ? ctx->username : "",
  1634. CIFS_MAX_USERNAME_LEN))
  1635. return 0;
  1636. if ((ctx->username && strlen(ctx->username) != 0) &&
  1637. ses->password != NULL &&
  1638. strncmp(ses->password,
  1639. ctx->password ? ctx->password : "",
  1640. CIFS_MAX_PASSWORD_LEN))
  1641. return 0;
  1642. }
  1643. return 1;
  1644. }
  1645. /**
  1646. * cifs_setup_ipc - helper to setup the IPC tcon for the session
  1647. * @ses: smb session to issue the request on
  1648. * @ctx: the superblock configuration context to use for building the
  1649. * new tree connection for the IPC (interprocess communication RPC)
  1650. *
  1651. * A new IPC connection is made and stored in the session
  1652. * tcon_ipc. The IPC tcon has the same lifetime as the session.
  1653. */
  1654. static int
  1655. cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
  1656. {
  1657. int rc = 0, xid;
  1658. struct cifs_tcon *tcon;
  1659. char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
  1660. bool seal = false;
  1661. struct TCP_Server_Info *server = ses->server;
  1662. /*
  1663. * If the mount request that resulted in the creation of the
  1664. * session requires encryption, force IPC to be encrypted too.
  1665. */
  1666. if (ctx->seal) {
  1667. if (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)
  1668. seal = true;
  1669. else {
  1670. cifs_server_dbg(VFS,
  1671. "IPC: server doesn't support encryption\n");
  1672. return -EOPNOTSUPP;
  1673. }
  1674. }
  1675. tcon = tconInfoAlloc();
  1676. if (tcon == NULL)
  1677. return -ENOMEM;
  1678. spin_lock(&server->srv_lock);
  1679. scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
  1680. spin_unlock(&server->srv_lock);
  1681. xid = get_xid();
  1682. tcon->ses = ses;
  1683. tcon->ipc = true;
  1684. tcon->seal = seal;
  1685. rc = server->ops->tree_connect(xid, ses, unc, tcon, ctx->local_nls);
  1686. free_xid(xid);
  1687. if (rc) {
  1688. cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
  1689. tconInfoFree(tcon);
  1690. goto out;
  1691. }
  1692. cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
  1693. spin_lock(&tcon->tc_lock);
  1694. tcon->status = TID_GOOD;
  1695. spin_unlock(&tcon->tc_lock);
  1696. ses->tcon_ipc = tcon;
  1697. out:
  1698. return rc;
  1699. }
  1700. /**
  1701. * cifs_free_ipc - helper to release the session IPC tcon
  1702. * @ses: smb session to unmount the IPC from
  1703. *
  1704. * Needs to be called everytime a session is destroyed.
  1705. *
  1706. * On session close, the IPC is closed and the server must release all tcons of the session.
  1707. * No need to send a tree disconnect here.
  1708. *
  1709. * Besides, it will make the server to not close durable and resilient files on session close, as
  1710. * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request.
  1711. */
  1712. static int
  1713. cifs_free_ipc(struct cifs_ses *ses)
  1714. {
  1715. struct cifs_tcon *tcon = ses->tcon_ipc;
  1716. if (tcon == NULL)
  1717. return 0;
  1718. tconInfoFree(tcon);
  1719. ses->tcon_ipc = NULL;
  1720. return 0;
  1721. }
  1722. static struct cifs_ses *
  1723. cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
  1724. {
  1725. struct cifs_ses *ses;
  1726. spin_lock(&cifs_tcp_ses_lock);
  1727. list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
  1728. spin_lock(&ses->ses_lock);
  1729. if (ses->ses_status == SES_EXITING) {
  1730. spin_unlock(&ses->ses_lock);
  1731. continue;
  1732. }
  1733. spin_lock(&ses->chan_lock);
  1734. if (!match_session(ses, ctx)) {
  1735. spin_unlock(&ses->chan_lock);
  1736. spin_unlock(&ses->ses_lock);
  1737. continue;
  1738. }
  1739. spin_unlock(&ses->chan_lock);
  1740. spin_unlock(&ses->ses_lock);
  1741. ++ses->ses_count;
  1742. spin_unlock(&cifs_tcp_ses_lock);
  1743. return ses;
  1744. }
  1745. spin_unlock(&cifs_tcp_ses_lock);
  1746. return NULL;
  1747. }
  1748. void cifs_put_smb_ses(struct cifs_ses *ses)
  1749. {
  1750. unsigned int rc, xid;
  1751. unsigned int chan_count;
  1752. struct TCP_Server_Info *server = ses->server;
  1753. spin_lock(&ses->ses_lock);
  1754. if (ses->ses_status == SES_EXITING) {
  1755. spin_unlock(&ses->ses_lock);
  1756. return;
  1757. }
  1758. spin_unlock(&ses->ses_lock);
  1759. cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
  1760. cifs_dbg(FYI,
  1761. "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE");
  1762. spin_lock(&cifs_tcp_ses_lock);
  1763. if (--ses->ses_count > 0) {
  1764. spin_unlock(&cifs_tcp_ses_lock);
  1765. return;
  1766. }
  1767. spin_unlock(&cifs_tcp_ses_lock);
  1768. /* ses_count can never go negative */
  1769. WARN_ON(ses->ses_count < 0);
  1770. if (ses->ses_status == SES_GOOD)
  1771. ses->ses_status = SES_EXITING;
  1772. cifs_free_ipc(ses);
  1773. if (ses->ses_status == SES_EXITING && server->ops->logoff) {
  1774. xid = get_xid();
  1775. rc = server->ops->logoff(xid, ses);
  1776. if (rc)
  1777. cifs_server_dbg(VFS, "%s: Session Logoff failure rc=%d\n",
  1778. __func__, rc);
  1779. _free_xid(xid);
  1780. }
  1781. spin_lock(&cifs_tcp_ses_lock);
  1782. list_del_init(&ses->smb_ses_list);
  1783. spin_unlock(&cifs_tcp_ses_lock);
  1784. chan_count = ses->chan_count;
  1785. /* close any extra channels */
  1786. if (chan_count > 1) {
  1787. int i;
  1788. for (i = 1; i < chan_count; i++) {
  1789. if (ses->chans[i].iface) {
  1790. kref_put(&ses->chans[i].iface->refcount, release_iface);
  1791. ses->chans[i].iface = NULL;
  1792. }
  1793. cifs_put_tcp_session(ses->chans[i].server, 0);
  1794. ses->chans[i].server = NULL;
  1795. }
  1796. }
  1797. /* we now account for primary channel in iface->refcount */
  1798. if (ses->chans[0].iface) {
  1799. kref_put(&ses->chans[0].iface->refcount, release_iface);
  1800. ses->chans[0].server = NULL;
  1801. }
  1802. sesInfoFree(ses);
  1803. cifs_put_tcp_session(server, 0);
  1804. }
  1805. #ifdef CONFIG_KEYS
  1806. /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
  1807. #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
  1808. /* Populate username and pw fields from keyring if possible */
  1809. static int
  1810. cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
  1811. {
  1812. int rc = 0;
  1813. int is_domain = 0;
  1814. const char *delim, *payload;
  1815. char *desc;
  1816. ssize_t len;
  1817. struct key *key;
  1818. struct TCP_Server_Info *server = ses->server;
  1819. struct sockaddr_in *sa;
  1820. struct sockaddr_in6 *sa6;
  1821. const struct user_key_payload *upayload;
  1822. desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
  1823. if (!desc)
  1824. return -ENOMEM;
  1825. /* try to find an address key first */
  1826. switch (server->dstaddr.ss_family) {
  1827. case AF_INET:
  1828. sa = (struct sockaddr_in *)&server->dstaddr;
  1829. sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
  1830. break;
  1831. case AF_INET6:
  1832. sa6 = (struct sockaddr_in6 *)&server->dstaddr;
  1833. sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
  1834. break;
  1835. default:
  1836. cifs_dbg(FYI, "Bad ss_family (%hu)\n",
  1837. server->dstaddr.ss_family);
  1838. rc = -EINVAL;
  1839. goto out_err;
  1840. }
  1841. cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
  1842. key = request_key(&key_type_logon, desc, "");
  1843. if (IS_ERR(key)) {
  1844. if (!ses->domainName) {
  1845. cifs_dbg(FYI, "domainName is NULL\n");
  1846. rc = PTR_ERR(key);
  1847. goto out_err;
  1848. }
  1849. /* didn't work, try to find a domain key */
  1850. sprintf(desc, "cifs:d:%s", ses->domainName);
  1851. cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
  1852. key = request_key(&key_type_logon, desc, "");
  1853. if (IS_ERR(key)) {
  1854. rc = PTR_ERR(key);
  1855. goto out_err;
  1856. }
  1857. is_domain = 1;
  1858. }
  1859. down_read(&key->sem);
  1860. upayload = user_key_payload_locked(key);
  1861. if (IS_ERR_OR_NULL(upayload)) {
  1862. rc = upayload ? PTR_ERR(upayload) : -EINVAL;
  1863. goto out_key_put;
  1864. }
  1865. /* find first : in payload */
  1866. payload = upayload->data;
  1867. delim = strnchr(payload, upayload->datalen, ':');
  1868. cifs_dbg(FYI, "payload=%s\n", payload);
  1869. if (!delim) {
  1870. cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
  1871. upayload->datalen);
  1872. rc = -EINVAL;
  1873. goto out_key_put;
  1874. }
  1875. len = delim - payload;
  1876. if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
  1877. cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
  1878. len);
  1879. rc = -EINVAL;
  1880. goto out_key_put;
  1881. }
  1882. ctx->username = kstrndup(payload, len, GFP_KERNEL);
  1883. if (!ctx->username) {
  1884. cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
  1885. len);
  1886. rc = -ENOMEM;
  1887. goto out_key_put;
  1888. }
  1889. cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
  1890. len = key->datalen - (len + 1);
  1891. if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
  1892. cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
  1893. rc = -EINVAL;
  1894. kfree(ctx->username);
  1895. ctx->username = NULL;
  1896. goto out_key_put;
  1897. }
  1898. ++delim;
  1899. ctx->password = kstrndup(delim, len, GFP_KERNEL);
  1900. if (!ctx->password) {
  1901. cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
  1902. len);
  1903. rc = -ENOMEM;
  1904. kfree(ctx->username);
  1905. ctx->username = NULL;
  1906. goto out_key_put;
  1907. }
  1908. /*
  1909. * If we have a domain key then we must set the domainName in the
  1910. * for the request.
  1911. */
  1912. if (is_domain && ses->domainName) {
  1913. ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
  1914. if (!ctx->domainname) {
  1915. cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
  1916. len);
  1917. rc = -ENOMEM;
  1918. kfree(ctx->username);
  1919. ctx->username = NULL;
  1920. kfree_sensitive(ctx->password);
  1921. ctx->password = NULL;
  1922. goto out_key_put;
  1923. }
  1924. }
  1925. strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
  1926. out_key_put:
  1927. up_read(&key->sem);
  1928. key_put(key);
  1929. out_err:
  1930. kfree(desc);
  1931. cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
  1932. return rc;
  1933. }
  1934. #else /* ! CONFIG_KEYS */
  1935. static inline int
  1936. cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
  1937. struct cifs_ses *ses __attribute__((unused)))
  1938. {
  1939. return -ENOSYS;
  1940. }
  1941. #endif /* CONFIG_KEYS */
  1942. /**
  1943. * cifs_get_smb_ses - get a session matching @ctx data from @server
  1944. * @server: server to setup the session to
  1945. * @ctx: superblock configuration context to use to setup the session
  1946. *
  1947. * This function assumes it is being called from cifs_mount() where we
  1948. * already got a server reference (server refcount +1). See
  1949. * cifs_get_tcon() for refcount explanations.
  1950. */
  1951. struct cifs_ses *
  1952. cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
  1953. {
  1954. int rc = 0;
  1955. unsigned int xid;
  1956. struct cifs_ses *ses;
  1957. struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
  1958. struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
  1959. xid = get_xid();
  1960. ses = cifs_find_smb_ses(server, ctx);
  1961. if (ses) {
  1962. cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
  1963. ses->ses_status);
  1964. spin_lock(&ses->chan_lock);
  1965. if (cifs_chan_needs_reconnect(ses, server)) {
  1966. spin_unlock(&ses->chan_lock);
  1967. cifs_dbg(FYI, "Session needs reconnect\n");
  1968. mutex_lock(&ses->session_mutex);
  1969. rc = cifs_negotiate_protocol(xid, ses, server);
  1970. if (rc) {
  1971. mutex_unlock(&ses->session_mutex);
  1972. /* problem -- put our ses reference */
  1973. cifs_put_smb_ses(ses);
  1974. free_xid(xid);
  1975. return ERR_PTR(rc);
  1976. }
  1977. rc = cifs_setup_session(xid, ses, server,
  1978. ctx->local_nls);
  1979. if (rc) {
  1980. mutex_unlock(&ses->session_mutex);
  1981. /* problem -- put our reference */
  1982. cifs_put_smb_ses(ses);
  1983. free_xid(xid);
  1984. return ERR_PTR(rc);
  1985. }
  1986. mutex_unlock(&ses->session_mutex);
  1987. spin_lock(&ses->chan_lock);
  1988. }
  1989. spin_unlock(&ses->chan_lock);
  1990. /* existing SMB ses has a server reference already */
  1991. cifs_put_tcp_session(server, 0);
  1992. free_xid(xid);
  1993. return ses;
  1994. }
  1995. rc = -ENOMEM;
  1996. cifs_dbg(FYI, "Existing smb sess not found\n");
  1997. ses = sesInfoAlloc();
  1998. if (ses == NULL)
  1999. goto get_ses_fail;
  2000. /* new SMB session uses our server ref */
  2001. ses->server = server;
  2002. if (server->dstaddr.ss_family == AF_INET6)
  2003. sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
  2004. else
  2005. sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
  2006. if (ctx->username) {
  2007. ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
  2008. if (!ses->user_name)
  2009. goto get_ses_fail;
  2010. }
  2011. /* ctx->password freed at unmount */
  2012. if (ctx->password) {
  2013. ses->password = kstrdup(ctx->password, GFP_KERNEL);
  2014. if (!ses->password)
  2015. goto get_ses_fail;
  2016. }
  2017. if (ctx->domainname) {
  2018. ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
  2019. if (!ses->domainName)
  2020. goto get_ses_fail;
  2021. }
  2022. strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
  2023. if (ctx->domainauto)
  2024. ses->domainAuto = ctx->domainauto;
  2025. ses->cred_uid = ctx->cred_uid;
  2026. ses->linux_uid = ctx->linux_uid;
  2027. ses->sectype = ctx->sectype;
  2028. ses->sign = ctx->sign;
  2029. /* add server as first channel */
  2030. spin_lock(&ses->chan_lock);
  2031. ses->chans[0].server = server;
  2032. ses->chan_count = 1;
  2033. ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
  2034. ses->chans_need_reconnect = 1;
  2035. spin_unlock(&ses->chan_lock);
  2036. mutex_lock(&ses->session_mutex);
  2037. rc = cifs_negotiate_protocol(xid, ses, server);
  2038. if (!rc)
  2039. rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
  2040. mutex_unlock(&ses->session_mutex);
  2041. /* each channel uses a different signing key */
  2042. spin_lock(&ses->chan_lock);
  2043. memcpy(ses->chans[0].signkey, ses->smb3signingkey,
  2044. sizeof(ses->smb3signingkey));
  2045. spin_unlock(&ses->chan_lock);
  2046. if (rc)
  2047. goto get_ses_fail;
  2048. /*
  2049. * success, put it on the list and add it as first channel
  2050. * note: the session becomes active soon after this. So you'll
  2051. * need to lock before changing something in the session.
  2052. */
  2053. spin_lock(&cifs_tcp_ses_lock);
  2054. list_add(&ses->smb_ses_list, &server->smb_ses_list);
  2055. spin_unlock(&cifs_tcp_ses_lock);
  2056. cifs_setup_ipc(ses, ctx);
  2057. free_xid(xid);
  2058. return ses;
  2059. get_ses_fail:
  2060. sesInfoFree(ses);
  2061. free_xid(xid);
  2062. return ERR_PTR(rc);
  2063. }
  2064. /* this function must be called with tc_lock held */
  2065. static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
  2066. {
  2067. if (tcon->status == TID_EXITING)
  2068. return 0;
  2069. if (strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE))
  2070. return 0;
  2071. if (tcon->seal != ctx->seal)
  2072. return 0;
  2073. if (tcon->snapshot_time != ctx->snapshot_time)
  2074. return 0;
  2075. if (tcon->handle_timeout != ctx->handle_timeout)
  2076. return 0;
  2077. if (tcon->no_lease != ctx->no_lease)
  2078. return 0;
  2079. if (tcon->nodelete != ctx->nodelete)
  2080. return 0;
  2081. return 1;
  2082. }
  2083. static struct cifs_tcon *
  2084. cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
  2085. {
  2086. struct cifs_tcon *tcon;
  2087. spin_lock(&cifs_tcp_ses_lock);
  2088. list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
  2089. spin_lock(&tcon->tc_lock);
  2090. if (!match_tcon(tcon, ctx)) {
  2091. spin_unlock(&tcon->tc_lock);
  2092. continue;
  2093. }
  2094. ++tcon->tc_count;
  2095. spin_unlock(&tcon->tc_lock);
  2096. spin_unlock(&cifs_tcp_ses_lock);
  2097. return tcon;
  2098. }
  2099. spin_unlock(&cifs_tcp_ses_lock);
  2100. return NULL;
  2101. }
  2102. void
  2103. cifs_put_tcon(struct cifs_tcon *tcon)
  2104. {
  2105. unsigned int xid;
  2106. struct cifs_ses *ses;
  2107. /*
  2108. * IPC tcon share the lifetime of their session and are
  2109. * destroyed in the session put function
  2110. */
  2111. if (tcon == NULL || tcon->ipc)
  2112. return;
  2113. ses = tcon->ses;
  2114. cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
  2115. spin_lock(&cifs_tcp_ses_lock);
  2116. spin_lock(&tcon->tc_lock);
  2117. if (--tcon->tc_count > 0) {
  2118. spin_unlock(&tcon->tc_lock);
  2119. spin_unlock(&cifs_tcp_ses_lock);
  2120. return;
  2121. }
  2122. /* tc_count can never go negative */
  2123. WARN_ON(tcon->tc_count < 0);
  2124. list_del_init(&tcon->tcon_list);
  2125. tcon->status = TID_EXITING;
  2126. spin_unlock(&tcon->tc_lock);
  2127. spin_unlock(&cifs_tcp_ses_lock);
  2128. /* cancel polling of interfaces */
  2129. cancel_delayed_work_sync(&tcon->query_interfaces);
  2130. if (tcon->use_witness) {
  2131. int rc;
  2132. rc = cifs_swn_unregister(tcon);
  2133. if (rc < 0) {
  2134. cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
  2135. __func__, rc);
  2136. }
  2137. }
  2138. xid = get_xid();
  2139. if (ses->server->ops->tree_disconnect)
  2140. ses->server->ops->tree_disconnect(xid, tcon);
  2141. _free_xid(xid);
  2142. cifs_fscache_release_super_cookie(tcon);
  2143. tconInfoFree(tcon);
  2144. cifs_put_smb_ses(ses);
  2145. }
  2146. /**
  2147. * cifs_get_tcon - get a tcon matching @ctx data from @ses
  2148. * @ses: smb session to issue the request on
  2149. * @ctx: the superblock configuration context to use for building the
  2150. *
  2151. * - tcon refcount is the number of mount points using the tcon.
  2152. * - ses refcount is the number of tcon using the session.
  2153. *
  2154. * 1. This function assumes it is being called from cifs_mount() where
  2155. * we already got a session reference (ses refcount +1).
  2156. *
  2157. * 2. Since we're in the context of adding a mount point, the end
  2158. * result should be either:
  2159. *
  2160. * a) a new tcon already allocated with refcount=1 (1 mount point) and
  2161. * its session refcount incremented (1 new tcon). This +1 was
  2162. * already done in (1).
  2163. *
  2164. * b) an existing tcon with refcount+1 (add a mount point to it) and
  2165. * identical ses refcount (no new tcon). Because of (1) we need to
  2166. * decrement the ses refcount.
  2167. */
  2168. static struct cifs_tcon *
  2169. cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
  2170. {
  2171. int rc, xid;
  2172. struct cifs_tcon *tcon;
  2173. tcon = cifs_find_tcon(ses, ctx);
  2174. if (tcon) {
  2175. /*
  2176. * tcon has refcount already incremented but we need to
  2177. * decrement extra ses reference gotten by caller (case b)
  2178. */
  2179. cifs_dbg(FYI, "Found match on UNC path\n");
  2180. cifs_put_smb_ses(ses);
  2181. return tcon;
  2182. }
  2183. if (!ses->server->ops->tree_connect) {
  2184. rc = -ENOSYS;
  2185. goto out_fail;
  2186. }
  2187. tcon = tconInfoAlloc();
  2188. if (tcon == NULL) {
  2189. rc = -ENOMEM;
  2190. goto out_fail;
  2191. }
  2192. if (ctx->snapshot_time) {
  2193. if (ses->server->vals->protocol_id == 0) {
  2194. cifs_dbg(VFS,
  2195. "Use SMB2 or later for snapshot mount option\n");
  2196. rc = -EOPNOTSUPP;
  2197. goto out_fail;
  2198. } else
  2199. tcon->snapshot_time = ctx->snapshot_time;
  2200. }
  2201. if (ctx->handle_timeout) {
  2202. if (ses->server->vals->protocol_id == 0) {
  2203. cifs_dbg(VFS,
  2204. "Use SMB2.1 or later for handle timeout option\n");
  2205. rc = -EOPNOTSUPP;
  2206. goto out_fail;
  2207. } else
  2208. tcon->handle_timeout = ctx->handle_timeout;
  2209. }
  2210. tcon->ses = ses;
  2211. if (ctx->password) {
  2212. tcon->password = kstrdup(ctx->password, GFP_KERNEL);
  2213. if (!tcon->password) {
  2214. rc = -ENOMEM;
  2215. goto out_fail;
  2216. }
  2217. }
  2218. if (ctx->seal) {
  2219. if (ses->server->vals->protocol_id == 0) {
  2220. cifs_dbg(VFS,
  2221. "SMB3 or later required for encryption\n");
  2222. rc = -EOPNOTSUPP;
  2223. goto out_fail;
  2224. } else if (tcon->ses->server->capabilities &
  2225. SMB2_GLOBAL_CAP_ENCRYPTION)
  2226. tcon->seal = true;
  2227. else {
  2228. cifs_dbg(VFS, "Encryption is not supported on share\n");
  2229. rc = -EOPNOTSUPP;
  2230. goto out_fail;
  2231. }
  2232. }
  2233. if (ctx->linux_ext) {
  2234. if (ses->server->posix_ext_supported) {
  2235. tcon->posix_extensions = true;
  2236. pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
  2237. } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
  2238. (strcmp(ses->server->vals->version_string,
  2239. SMB3ANY_VERSION_STRING) == 0) ||
  2240. (strcmp(ses->server->vals->version_string,
  2241. SMBDEFAULT_VERSION_STRING) == 0)) {
  2242. cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
  2243. rc = -EOPNOTSUPP;
  2244. goto out_fail;
  2245. } else {
  2246. cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
  2247. "disabled but required for POSIX extensions\n");
  2248. rc = -EOPNOTSUPP;
  2249. goto out_fail;
  2250. }
  2251. }
  2252. xid = get_xid();
  2253. rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
  2254. ctx->local_nls);
  2255. free_xid(xid);
  2256. cifs_dbg(FYI, "Tcon rc = %d\n", rc);
  2257. if (rc)
  2258. goto out_fail;
  2259. tcon->use_persistent = false;
  2260. /* check if SMB2 or later, CIFS does not support persistent handles */
  2261. if (ctx->persistent) {
  2262. if (ses->server->vals->protocol_id == 0) {
  2263. cifs_dbg(VFS,
  2264. "SMB3 or later required for persistent handles\n");
  2265. rc = -EOPNOTSUPP;
  2266. goto out_fail;
  2267. } else if (ses->server->capabilities &
  2268. SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
  2269. tcon->use_persistent = true;
  2270. else /* persistent handles requested but not supported */ {
  2271. cifs_dbg(VFS,
  2272. "Persistent handles not supported on share\n");
  2273. rc = -EOPNOTSUPP;
  2274. goto out_fail;
  2275. }
  2276. } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
  2277. && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
  2278. && (ctx->nopersistent == false)) {
  2279. cifs_dbg(FYI, "enabling persistent handles\n");
  2280. tcon->use_persistent = true;
  2281. } else if (ctx->resilient) {
  2282. if (ses->server->vals->protocol_id == 0) {
  2283. cifs_dbg(VFS,
  2284. "SMB2.1 or later required for resilient handles\n");
  2285. rc = -EOPNOTSUPP;
  2286. goto out_fail;
  2287. }
  2288. tcon->use_resilient = true;
  2289. }
  2290. tcon->use_witness = false;
  2291. if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
  2292. if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
  2293. if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
  2294. /*
  2295. * Set witness in use flag in first place
  2296. * to retry registration in the echo task
  2297. */
  2298. tcon->use_witness = true;
  2299. /* And try to register immediately */
  2300. rc = cifs_swn_register(tcon);
  2301. if (rc < 0) {
  2302. cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
  2303. goto out_fail;
  2304. }
  2305. } else {
  2306. /* TODO: try to extend for non-cluster uses (eg multichannel) */
  2307. cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
  2308. rc = -EOPNOTSUPP;
  2309. goto out_fail;
  2310. }
  2311. } else {
  2312. cifs_dbg(VFS, "SMB3 or later required for witness option\n");
  2313. rc = -EOPNOTSUPP;
  2314. goto out_fail;
  2315. }
  2316. }
  2317. /* If the user really knows what they are doing they can override */
  2318. if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
  2319. if (ctx->cache_ro)
  2320. cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
  2321. else if (ctx->cache_rw)
  2322. cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
  2323. }
  2324. if (ctx->no_lease) {
  2325. if (ses->server->vals->protocol_id == 0) {
  2326. cifs_dbg(VFS,
  2327. "SMB2 or later required for nolease option\n");
  2328. rc = -EOPNOTSUPP;
  2329. goto out_fail;
  2330. } else
  2331. tcon->no_lease = ctx->no_lease;
  2332. }
  2333. /*
  2334. * We can have only one retry value for a connection to a share so for
  2335. * resources mounted more than once to the same server share the last
  2336. * value passed in for the retry flag is used.
  2337. */
  2338. tcon->retry = ctx->retry;
  2339. tcon->nocase = ctx->nocase;
  2340. tcon->broken_sparse_sup = ctx->no_sparse;
  2341. if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
  2342. tcon->nohandlecache = ctx->nohandlecache;
  2343. else
  2344. tcon->nohandlecache = true;
  2345. tcon->nodelete = ctx->nodelete;
  2346. tcon->local_lease = ctx->local_lease;
  2347. INIT_LIST_HEAD(&tcon->pending_opens);
  2348. tcon->status = TID_GOOD;
  2349. INIT_DELAYED_WORK(&tcon->query_interfaces,
  2350. smb2_query_server_interfaces);
  2351. if (ses->server->dialect >= SMB30_PROT_ID &&
  2352. (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
  2353. /* schedule query interfaces poll */
  2354. queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
  2355. (SMB_INTERFACE_POLL_INTERVAL * HZ));
  2356. }
  2357. spin_lock(&cifs_tcp_ses_lock);
  2358. list_add(&tcon->tcon_list, &ses->tcon_list);
  2359. spin_unlock(&cifs_tcp_ses_lock);
  2360. return tcon;
  2361. out_fail:
  2362. tconInfoFree(tcon);
  2363. return ERR_PTR(rc);
  2364. }
  2365. void
  2366. cifs_put_tlink(struct tcon_link *tlink)
  2367. {
  2368. if (!tlink || IS_ERR(tlink))
  2369. return;
  2370. if (!atomic_dec_and_test(&tlink->tl_count) ||
  2371. test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
  2372. tlink->tl_time = jiffies;
  2373. return;
  2374. }
  2375. if (!IS_ERR(tlink_tcon(tlink)))
  2376. cifs_put_tcon(tlink_tcon(tlink));
  2377. kfree(tlink);
  2378. return;
  2379. }
  2380. static int
  2381. compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
  2382. {
  2383. struct cifs_sb_info *old = CIFS_SB(sb);
  2384. struct cifs_sb_info *new = mnt_data->cifs_sb;
  2385. unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
  2386. unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
  2387. if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
  2388. return 0;
  2389. if (old->mnt_cifs_serverino_autodisabled)
  2390. newflags &= ~CIFS_MOUNT_SERVER_INUM;
  2391. if (oldflags != newflags)
  2392. return 0;
  2393. /*
  2394. * We want to share sb only if we don't specify an r/wsize or
  2395. * specified r/wsize is greater than or equal to existing one.
  2396. */
  2397. if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
  2398. return 0;
  2399. if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
  2400. return 0;
  2401. if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
  2402. !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
  2403. return 0;
  2404. if (old->ctx->file_mode != new->ctx->file_mode ||
  2405. old->ctx->dir_mode != new->ctx->dir_mode)
  2406. return 0;
  2407. if (strcmp(old->local_nls->charset, new->local_nls->charset))
  2408. return 0;
  2409. if (old->ctx->acregmax != new->ctx->acregmax)
  2410. return 0;
  2411. if (old->ctx->acdirmax != new->ctx->acdirmax)
  2412. return 0;
  2413. if (old->ctx->closetimeo != new->ctx->closetimeo)
  2414. return 0;
  2415. return 1;
  2416. }
  2417. static int
  2418. match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data)
  2419. {
  2420. struct cifs_sb_info *old = CIFS_SB(sb);
  2421. struct cifs_sb_info *new = mnt_data->cifs_sb;
  2422. bool old_set = (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
  2423. old->prepath;
  2424. bool new_set = (new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
  2425. new->prepath;
  2426. if (old_set && new_set && !strcmp(new->prepath, old->prepath))
  2427. return 1;
  2428. else if (!old_set && !new_set)
  2429. return 1;
  2430. return 0;
  2431. }
  2432. int
  2433. cifs_match_super(struct super_block *sb, void *data)
  2434. {
  2435. struct cifs_mnt_data *mnt_data = data;
  2436. struct smb3_fs_context *ctx;
  2437. struct cifs_sb_info *cifs_sb;
  2438. struct TCP_Server_Info *tcp_srv;
  2439. struct cifs_ses *ses;
  2440. struct cifs_tcon *tcon;
  2441. struct tcon_link *tlink;
  2442. int rc = 0;
  2443. spin_lock(&cifs_tcp_ses_lock);
  2444. cifs_sb = CIFS_SB(sb);
  2445. /* We do not want to use a superblock that has been shutdown */
  2446. if (CIFS_MOUNT_SHUTDOWN & cifs_sb->mnt_cifs_flags) {
  2447. spin_unlock(&cifs_tcp_ses_lock);
  2448. return 0;
  2449. }
  2450. tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
  2451. if (tlink == NULL) {
  2452. /* can not match superblock if tlink were ever null */
  2453. spin_unlock(&cifs_tcp_ses_lock);
  2454. return 0;
  2455. }
  2456. tcon = tlink_tcon(tlink);
  2457. ses = tcon->ses;
  2458. tcp_srv = ses->server;
  2459. ctx = mnt_data->ctx;
  2460. spin_lock(&tcp_srv->srv_lock);
  2461. spin_lock(&ses->ses_lock);
  2462. spin_lock(&ses->chan_lock);
  2463. spin_lock(&tcon->tc_lock);
  2464. if (!match_server(tcp_srv, ctx) ||
  2465. !match_session(ses, ctx) ||
  2466. !match_tcon(tcon, ctx) ||
  2467. !match_prepath(sb, mnt_data)) {
  2468. rc = 0;
  2469. goto out;
  2470. }
  2471. rc = compare_mount_options(sb, mnt_data);
  2472. out:
  2473. spin_unlock(&tcon->tc_lock);
  2474. spin_unlock(&ses->chan_lock);
  2475. spin_unlock(&ses->ses_lock);
  2476. spin_unlock(&tcp_srv->srv_lock);
  2477. spin_unlock(&cifs_tcp_ses_lock);
  2478. cifs_put_tlink(tlink);
  2479. return rc;
  2480. }
  2481. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  2482. static struct lock_class_key cifs_key[2];
  2483. static struct lock_class_key cifs_slock_key[2];
  2484. static inline void
  2485. cifs_reclassify_socket4(struct socket *sock)
  2486. {
  2487. struct sock *sk = sock->sk;
  2488. BUG_ON(!sock_allow_reclassification(sk));
  2489. sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
  2490. &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
  2491. }
  2492. static inline void
  2493. cifs_reclassify_socket6(struct socket *sock)
  2494. {
  2495. struct sock *sk = sock->sk;
  2496. BUG_ON(!sock_allow_reclassification(sk));
  2497. sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
  2498. &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
  2499. }
  2500. #else
  2501. static inline void
  2502. cifs_reclassify_socket4(struct socket *sock)
  2503. {
  2504. }
  2505. static inline void
  2506. cifs_reclassify_socket6(struct socket *sock)
  2507. {
  2508. }
  2509. #endif
  2510. /* See RFC1001 section 14 on representation of Netbios names */
  2511. static void rfc1002mangle(char *target, char *source, unsigned int length)
  2512. {
  2513. unsigned int i, j;
  2514. for (i = 0, j = 0; i < (length); i++) {
  2515. /* mask a nibble at a time and encode */
  2516. target[j] = 'A' + (0x0F & (source[i] >> 4));
  2517. target[j+1] = 'A' + (0x0F & source[i]);
  2518. j += 2;
  2519. }
  2520. }
  2521. static int
  2522. bind_socket(struct TCP_Server_Info *server)
  2523. {
  2524. int rc = 0;
  2525. if (server->srcaddr.ss_family != AF_UNSPEC) {
  2526. /* Bind to the specified local IP address */
  2527. struct socket *socket = server->ssocket;
  2528. rc = kernel_bind(socket,
  2529. (struct sockaddr *) &server->srcaddr,
  2530. sizeof(server->srcaddr));
  2531. if (rc < 0) {
  2532. struct sockaddr_in *saddr4;
  2533. struct sockaddr_in6 *saddr6;
  2534. saddr4 = (struct sockaddr_in *)&server->srcaddr;
  2535. saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
  2536. if (saddr6->sin6_family == AF_INET6)
  2537. cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
  2538. &saddr6->sin6_addr, rc);
  2539. else
  2540. cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
  2541. &saddr4->sin_addr.s_addr, rc);
  2542. }
  2543. }
  2544. return rc;
  2545. }
  2546. static int
  2547. ip_rfc1001_connect(struct TCP_Server_Info *server)
  2548. {
  2549. int rc = 0;
  2550. /*
  2551. * some servers require RFC1001 sessinit before sending
  2552. * negprot - BB check reconnection in case where second
  2553. * sessinit is sent but no second negprot
  2554. */
  2555. struct rfc1002_session_packet req = {};
  2556. struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
  2557. unsigned int len;
  2558. req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
  2559. if (server->server_RFC1001_name[0] != 0)
  2560. rfc1002mangle(req.trailer.session_req.called_name,
  2561. server->server_RFC1001_name,
  2562. RFC1001_NAME_LEN_WITH_NULL);
  2563. else
  2564. rfc1002mangle(req.trailer.session_req.called_name,
  2565. DEFAULT_CIFS_CALLED_NAME,
  2566. RFC1001_NAME_LEN_WITH_NULL);
  2567. req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
  2568. /* calling name ends in null (byte 16) from old smb convention */
  2569. if (server->workstation_RFC1001_name[0] != 0)
  2570. rfc1002mangle(req.trailer.session_req.calling_name,
  2571. server->workstation_RFC1001_name,
  2572. RFC1001_NAME_LEN_WITH_NULL);
  2573. else
  2574. rfc1002mangle(req.trailer.session_req.calling_name,
  2575. "LINUX_CIFS_CLNT",
  2576. RFC1001_NAME_LEN_WITH_NULL);
  2577. /*
  2578. * As per rfc1002, @len must be the number of bytes that follows the
  2579. * length field of a rfc1002 session request payload.
  2580. */
  2581. len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
  2582. smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
  2583. rc = smb_send(server, smb_buf, len);
  2584. /*
  2585. * RFC1001 layer in at least one server requires very short break before
  2586. * negprot presumably because not expecting negprot to follow so fast.
  2587. * This is a simple solution that works without complicating the code
  2588. * and causes no significant slowing down on mount for everyone else
  2589. */
  2590. usleep_range(1000, 2000);
  2591. return rc;
  2592. }
  2593. static int
  2594. generic_ip_connect(struct TCP_Server_Info *server)
  2595. {
  2596. int rc = 0;
  2597. __be16 sport;
  2598. int slen, sfamily;
  2599. struct socket *socket = server->ssocket;
  2600. struct sockaddr *saddr;
  2601. saddr = (struct sockaddr *) &server->dstaddr;
  2602. if (server->dstaddr.ss_family == AF_INET6) {
  2603. struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
  2604. sport = ipv6->sin6_port;
  2605. slen = sizeof(struct sockaddr_in6);
  2606. sfamily = AF_INET6;
  2607. cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
  2608. ntohs(sport));
  2609. } else {
  2610. struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
  2611. sport = ipv4->sin_port;
  2612. slen = sizeof(struct sockaddr_in);
  2613. sfamily = AF_INET;
  2614. cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
  2615. ntohs(sport));
  2616. }
  2617. if (socket == NULL) {
  2618. rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
  2619. IPPROTO_TCP, &socket, 1);
  2620. if (rc < 0) {
  2621. cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
  2622. server->ssocket = NULL;
  2623. return rc;
  2624. }
  2625. /* BB other socket options to set KEEPALIVE, NODELAY? */
  2626. cifs_dbg(FYI, "Socket created\n");
  2627. server->ssocket = socket;
  2628. socket->sk->sk_allocation = GFP_NOFS;
  2629. if (sfamily == AF_INET6)
  2630. cifs_reclassify_socket6(socket);
  2631. else
  2632. cifs_reclassify_socket4(socket);
  2633. }
  2634. rc = bind_socket(server);
  2635. if (rc < 0)
  2636. return rc;
  2637. /*
  2638. * Eventually check for other socket options to change from
  2639. * the default. sock_setsockopt not used because it expects
  2640. * user space buffer
  2641. */
  2642. socket->sk->sk_rcvtimeo = 7 * HZ;
  2643. socket->sk->sk_sndtimeo = 5 * HZ;
  2644. /* make the bufsizes depend on wsize/rsize and max requests */
  2645. if (server->noautotune) {
  2646. if (socket->sk->sk_sndbuf < (200 * 1024))
  2647. socket->sk->sk_sndbuf = 200 * 1024;
  2648. if (socket->sk->sk_rcvbuf < (140 * 1024))
  2649. socket->sk->sk_rcvbuf = 140 * 1024;
  2650. }
  2651. if (server->tcp_nodelay)
  2652. tcp_sock_set_nodelay(socket->sk);
  2653. cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
  2654. socket->sk->sk_sndbuf,
  2655. socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
  2656. rc = kernel_connect(socket, saddr, slen,
  2657. server->noblockcnt ? O_NONBLOCK : 0);
  2658. /*
  2659. * When mounting SMB root file systems, we do not want to block in
  2660. * connect. Otherwise bail out and then let cifs_reconnect() perform
  2661. * reconnect failover - if possible.
  2662. */
  2663. if (server->noblockcnt && rc == -EINPROGRESS)
  2664. rc = 0;
  2665. if (rc < 0) {
  2666. cifs_dbg(FYI, "Error %d connecting to server\n", rc);
  2667. trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
  2668. sock_release(socket);
  2669. server->ssocket = NULL;
  2670. return rc;
  2671. }
  2672. trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
  2673. if (sport == htons(RFC1001_PORT))
  2674. rc = ip_rfc1001_connect(server);
  2675. return rc;
  2676. }
  2677. static int
  2678. ip_connect(struct TCP_Server_Info *server)
  2679. {
  2680. __be16 *sport;
  2681. struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
  2682. struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
  2683. if (server->dstaddr.ss_family == AF_INET6)
  2684. sport = &addr6->sin6_port;
  2685. else
  2686. sport = &addr->sin_port;
  2687. if (*sport == 0) {
  2688. int rc;
  2689. /* try with 445 port at first */
  2690. *sport = htons(CIFS_PORT);
  2691. rc = generic_ip_connect(server);
  2692. if (rc >= 0)
  2693. return rc;
  2694. /* if it failed, try with 139 port */
  2695. *sport = htons(RFC1001_PORT);
  2696. }
  2697. return generic_ip_connect(server);
  2698. }
  2699. #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
  2700. void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
  2701. struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
  2702. {
  2703. /*
  2704. * If we are reconnecting then should we check to see if
  2705. * any requested capabilities changed locally e.g. via
  2706. * remount but we can not do much about it here
  2707. * if they have (even if we could detect it by the following)
  2708. * Perhaps we could add a backpointer to array of sb from tcon
  2709. * or if we change to make all sb to same share the same
  2710. * sb as NFS - then we only have one backpointer to sb.
  2711. * What if we wanted to mount the server share twice once with
  2712. * and once without posixacls or posix paths?
  2713. */
  2714. __u64 saved_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
  2715. if (ctx && ctx->no_linux_ext) {
  2716. tcon->fsUnixInfo.Capability = 0;
  2717. tcon->unix_ext = 0; /* Unix Extensions disabled */
  2718. cifs_dbg(FYI, "Linux protocol extensions disabled\n");
  2719. return;
  2720. } else if (ctx)
  2721. tcon->unix_ext = 1; /* Unix Extensions supported */
  2722. if (!tcon->unix_ext) {
  2723. cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
  2724. return;
  2725. }
  2726. if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
  2727. __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
  2728. cifs_dbg(FYI, "unix caps which server supports %lld\n", cap);
  2729. /*
  2730. * check for reconnect case in which we do not
  2731. * want to change the mount behavior if we can avoid it
  2732. */
  2733. if (ctx == NULL) {
  2734. /*
  2735. * turn off POSIX ACL and PATHNAMES if not set
  2736. * originally at mount time
  2737. */
  2738. if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
  2739. cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
  2740. if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
  2741. if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
  2742. cifs_dbg(VFS, "POSIXPATH support change\n");
  2743. cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
  2744. } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
  2745. cifs_dbg(VFS, "possible reconnect error\n");
  2746. cifs_dbg(VFS, "server disabled POSIX path support\n");
  2747. }
  2748. }
  2749. if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
  2750. cifs_dbg(VFS, "per-share encryption not supported yet\n");
  2751. cap &= CIFS_UNIX_CAP_MASK;
  2752. if (ctx && ctx->no_psx_acl)
  2753. cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
  2754. else if (CIFS_UNIX_POSIX_ACL_CAP & cap) {
  2755. cifs_dbg(FYI, "negotiated posix acl support\n");
  2756. if (cifs_sb)
  2757. cifs_sb->mnt_cifs_flags |=
  2758. CIFS_MOUNT_POSIXACL;
  2759. }
  2760. if (ctx && ctx->posix_paths == 0)
  2761. cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
  2762. else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
  2763. cifs_dbg(FYI, "negotiate posix pathnames\n");
  2764. if (cifs_sb)
  2765. cifs_sb->mnt_cifs_flags |=
  2766. CIFS_MOUNT_POSIX_PATHS;
  2767. }
  2768. cifs_dbg(FYI, "Negotiate caps 0x%x\n", (int)cap);
  2769. #ifdef CONFIG_CIFS_DEBUG2
  2770. if (cap & CIFS_UNIX_FCNTL_CAP)
  2771. cifs_dbg(FYI, "FCNTL cap\n");
  2772. if (cap & CIFS_UNIX_EXTATTR_CAP)
  2773. cifs_dbg(FYI, "EXTATTR cap\n");
  2774. if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
  2775. cifs_dbg(FYI, "POSIX path cap\n");
  2776. if (cap & CIFS_UNIX_XATTR_CAP)
  2777. cifs_dbg(FYI, "XATTR cap\n");
  2778. if (cap & CIFS_UNIX_POSIX_ACL_CAP)
  2779. cifs_dbg(FYI, "POSIX ACL cap\n");
  2780. if (cap & CIFS_UNIX_LARGE_READ_CAP)
  2781. cifs_dbg(FYI, "very large read cap\n");
  2782. if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
  2783. cifs_dbg(FYI, "very large write cap\n");
  2784. if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
  2785. cifs_dbg(FYI, "transport encryption cap\n");
  2786. if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
  2787. cifs_dbg(FYI, "mandatory transport encryption cap\n");
  2788. #endif /* CIFS_DEBUG2 */
  2789. if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
  2790. if (ctx == NULL)
  2791. cifs_dbg(FYI, "resetting capabilities failed\n");
  2792. else
  2793. cifs_dbg(VFS, "Negotiating Unix capabilities with the server failed. Consider mounting with the Unix Extensions disabled if problems are found by specifying the nounix mount option.\n");
  2794. }
  2795. }
  2796. }
  2797. #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
  2798. int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
  2799. {
  2800. struct smb3_fs_context *ctx = cifs_sb->ctx;
  2801. INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
  2802. spin_lock_init(&cifs_sb->tlink_tree_lock);
  2803. cifs_sb->tlink_tree = RB_ROOT;
  2804. cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
  2805. ctx->file_mode, ctx->dir_mode);
  2806. /* this is needed for ASCII cp to Unicode converts */
  2807. if (ctx->iocharset == NULL) {
  2808. /* load_nls_default cannot return null */
  2809. cifs_sb->local_nls = load_nls_default();
  2810. } else {
  2811. cifs_sb->local_nls = load_nls(ctx->iocharset);
  2812. if (cifs_sb->local_nls == NULL) {
  2813. cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
  2814. ctx->iocharset);
  2815. return -ELIBACC;
  2816. }
  2817. }
  2818. ctx->local_nls = cifs_sb->local_nls;
  2819. smb3_update_mnt_flags(cifs_sb);
  2820. if (ctx->direct_io)
  2821. cifs_dbg(FYI, "mounting share using direct i/o\n");
  2822. if (ctx->cache_ro) {
  2823. cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
  2824. cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RO_CACHE;
  2825. } else if (ctx->cache_rw) {
  2826. cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
  2827. cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_RO_CACHE |
  2828. CIFS_MOUNT_RW_CACHE);
  2829. }
  2830. if ((ctx->cifs_acl) && (ctx->dynperm))
  2831. cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
  2832. if (ctx->prepath) {
  2833. cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
  2834. if (cifs_sb->prepath == NULL)
  2835. return -ENOMEM;
  2836. cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
  2837. }
  2838. return 0;
  2839. }
  2840. /* Release all succeed connections */
  2841. static inline void mount_put_conns(struct mount_ctx *mnt_ctx)
  2842. {
  2843. int rc = 0;
  2844. if (mnt_ctx->tcon)
  2845. cifs_put_tcon(mnt_ctx->tcon);
  2846. else if (mnt_ctx->ses)
  2847. cifs_put_smb_ses(mnt_ctx->ses);
  2848. else if (mnt_ctx->server)
  2849. cifs_put_tcp_session(mnt_ctx->server, 0);
  2850. mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
  2851. free_xid(mnt_ctx->xid);
  2852. }
  2853. /* Get connections for tcp, ses and tcon */
  2854. static int mount_get_conns(struct mount_ctx *mnt_ctx)
  2855. {
  2856. int rc = 0;
  2857. struct TCP_Server_Info *server = NULL;
  2858. struct cifs_ses *ses = NULL;
  2859. struct cifs_tcon *tcon = NULL;
  2860. struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
  2861. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  2862. unsigned int xid;
  2863. xid = get_xid();
  2864. /* get a reference to a tcp session */
  2865. server = cifs_get_tcp_session(ctx, NULL);
  2866. if (IS_ERR(server)) {
  2867. rc = PTR_ERR(server);
  2868. server = NULL;
  2869. goto out;
  2870. }
  2871. /* get a reference to a SMB session */
  2872. ses = cifs_get_smb_ses(server, ctx);
  2873. if (IS_ERR(ses)) {
  2874. rc = PTR_ERR(ses);
  2875. ses = NULL;
  2876. goto out;
  2877. }
  2878. if ((ctx->persistent == true) && (!(ses->server->capabilities &
  2879. SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
  2880. cifs_server_dbg(VFS, "persistent handles not supported by server\n");
  2881. rc = -EOPNOTSUPP;
  2882. goto out;
  2883. }
  2884. /* search for existing tcon to this server share */
  2885. tcon = cifs_get_tcon(ses, ctx);
  2886. if (IS_ERR(tcon)) {
  2887. rc = PTR_ERR(tcon);
  2888. tcon = NULL;
  2889. goto out;
  2890. }
  2891. /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
  2892. if (tcon->posix_extensions)
  2893. cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
  2894. #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
  2895. /* tell server which Unix caps we support */
  2896. if (cap_unix(tcon->ses)) {
  2897. /*
  2898. * reset of caps checks mount to see if unix extensions disabled
  2899. * for just this mount.
  2900. */
  2901. reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
  2902. spin_lock(&tcon->ses->server->srv_lock);
  2903. if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
  2904. (le64_to_cpu(tcon->fsUnixInfo.Capability) &
  2905. CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
  2906. spin_unlock(&tcon->ses->server->srv_lock);
  2907. rc = -EACCES;
  2908. goto out;
  2909. }
  2910. spin_unlock(&tcon->ses->server->srv_lock);
  2911. } else
  2912. #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
  2913. tcon->unix_ext = 0; /* server does not support them */
  2914. /* do not care if a following call succeed - informational */
  2915. if (!tcon->pipe && server->ops->qfs_tcon) {
  2916. server->ops->qfs_tcon(xid, tcon, cifs_sb);
  2917. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
  2918. if (tcon->fsDevInfo.DeviceCharacteristics &
  2919. cpu_to_le32(FILE_READ_ONLY_DEVICE))
  2920. cifs_dbg(VFS, "mounted to read only share\n");
  2921. else if ((cifs_sb->mnt_cifs_flags &
  2922. CIFS_MOUNT_RW_CACHE) == 0)
  2923. cifs_dbg(VFS, "read only mount of RW share\n");
  2924. /* no need to log a RW mount of a typical RW share */
  2925. }
  2926. }
  2927. /*
  2928. * Clamp the rsize/wsize mount arguments if they are too big for the server
  2929. * and set the rsize/wsize to the negotiated values if not passed in by
  2930. * the user on mount
  2931. */
  2932. if ((cifs_sb->ctx->wsize == 0) ||
  2933. (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx)))
  2934. cifs_sb->ctx->wsize = server->ops->negotiate_wsize(tcon, ctx);
  2935. if ((cifs_sb->ctx->rsize == 0) ||
  2936. (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
  2937. cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
  2938. /*
  2939. * The cookie is initialized from volume info returned above.
  2940. * Inside cifs_fscache_get_super_cookie it checks
  2941. * that we do not get super cookie twice.
  2942. */
  2943. if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
  2944. cifs_fscache_get_super_cookie(tcon);
  2945. out:
  2946. mnt_ctx->server = server;
  2947. mnt_ctx->ses = ses;
  2948. mnt_ctx->tcon = tcon;
  2949. mnt_ctx->xid = xid;
  2950. return rc;
  2951. }
  2952. static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
  2953. struct cifs_tcon *tcon)
  2954. {
  2955. struct tcon_link *tlink;
  2956. /* hang the tcon off of the superblock */
  2957. tlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
  2958. if (tlink == NULL)
  2959. return -ENOMEM;
  2960. tlink->tl_uid = ses->linux_uid;
  2961. tlink->tl_tcon = tcon;
  2962. tlink->tl_time = jiffies;
  2963. set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
  2964. set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
  2965. cifs_sb->master_tlink = tlink;
  2966. spin_lock(&cifs_sb->tlink_tree_lock);
  2967. tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
  2968. spin_unlock(&cifs_sb->tlink_tree_lock);
  2969. queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
  2970. TLINK_IDLE_EXPIRE);
  2971. return 0;
  2972. }
  2973. #ifdef CONFIG_CIFS_DFS_UPCALL
  2974. /* Get unique dfs connections */
  2975. static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
  2976. {
  2977. int rc;
  2978. mnt_ctx->fs_ctx->nosharesock = true;
  2979. rc = mount_get_conns(mnt_ctx);
  2980. if (mnt_ctx->server) {
  2981. cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
  2982. spin_lock(&mnt_ctx->server->srv_lock);
  2983. mnt_ctx->server->is_dfs_conn = true;
  2984. spin_unlock(&mnt_ctx->server->srv_lock);
  2985. }
  2986. return rc;
  2987. }
  2988. /*
  2989. * cifs_build_path_to_root returns full path to root when we do not have an
  2990. * existing connection (tcon)
  2991. */
  2992. static char *
  2993. build_unc_path_to_root(const struct smb3_fs_context *ctx,
  2994. const struct cifs_sb_info *cifs_sb, bool useppath)
  2995. {
  2996. char *full_path, *pos;
  2997. unsigned int pplen = useppath && ctx->prepath ?
  2998. strlen(ctx->prepath) + 1 : 0;
  2999. unsigned int unc_len = strnlen(ctx->UNC, MAX_TREE_SIZE + 1);
  3000. if (unc_len > MAX_TREE_SIZE)
  3001. return ERR_PTR(-EINVAL);
  3002. full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
  3003. if (full_path == NULL)
  3004. return ERR_PTR(-ENOMEM);
  3005. memcpy(full_path, ctx->UNC, unc_len);
  3006. pos = full_path + unc_len;
  3007. if (pplen) {
  3008. *pos = CIFS_DIR_SEP(cifs_sb);
  3009. memcpy(pos + 1, ctx->prepath, pplen);
  3010. pos += pplen;
  3011. }
  3012. *pos = '\0'; /* add trailing null */
  3013. convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
  3014. cifs_dbg(FYI, "%s: full_path=%s\n", __func__, full_path);
  3015. return full_path;
  3016. }
  3017. /*
  3018. * expand_dfs_referral - Update cifs_sb from dfs referral path
  3019. *
  3020. * cifs_sb->ctx->mount_options will be (re-)allocated to a string containing updated options for the
  3021. * submount. Otherwise it will be left untouched.
  3022. */
  3023. static int expand_dfs_referral(struct mount_ctx *mnt_ctx, const char *full_path,
  3024. struct dfs_info3_param *referral)
  3025. {
  3026. int rc;
  3027. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  3028. struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
  3029. char *fake_devname = NULL, *mdata = NULL;
  3030. mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, referral,
  3031. &fake_devname);
  3032. if (IS_ERR(mdata)) {
  3033. rc = PTR_ERR(mdata);
  3034. mdata = NULL;
  3035. } else {
  3036. /*
  3037. * We can not clear out the whole structure since we no longer have an explicit
  3038. * function to parse a mount-string. Instead we need to clear out the individual
  3039. * fields that are no longer valid.
  3040. */
  3041. kfree(ctx->prepath);
  3042. ctx->prepath = NULL;
  3043. rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
  3044. }
  3045. kfree(fake_devname);
  3046. kfree(cifs_sb->ctx->mount_options);
  3047. cifs_sb->ctx->mount_options = mdata;
  3048. return rc;
  3049. }
  3050. #endif
  3051. /* TODO: all callers to this are broken. We are not parsing mount_options here
  3052. * we should pass a clone of the original context?
  3053. */
  3054. int
  3055. cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
  3056. {
  3057. int rc;
  3058. if (devname) {
  3059. cifs_dbg(FYI, "%s: devname=%s\n", __func__, devname);
  3060. rc = smb3_parse_devname(devname, ctx);
  3061. if (rc) {
  3062. cifs_dbg(VFS, "%s: failed to parse %s: %d\n", __func__, devname, rc);
  3063. return rc;
  3064. }
  3065. }
  3066. if (mntopts) {
  3067. char *ip;
  3068. rc = smb3_parse_opt(mntopts, "ip", &ip);
  3069. if (rc) {
  3070. cifs_dbg(VFS, "%s: failed to parse ip options: %d\n", __func__, rc);
  3071. return rc;
  3072. }
  3073. rc = cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip, strlen(ip));
  3074. kfree(ip);
  3075. if (!rc) {
  3076. cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
  3077. return -EINVAL;
  3078. }
  3079. }
  3080. if (ctx->nullauth) {
  3081. cifs_dbg(FYI, "Anonymous login\n");
  3082. kfree(ctx->username);
  3083. ctx->username = NULL;
  3084. } else if (ctx->username) {
  3085. /* BB fixme parse for domain name here */
  3086. cifs_dbg(FYI, "Username: %s\n", ctx->username);
  3087. } else {
  3088. cifs_dbg(VFS, "No username specified\n");
  3089. /* In userspace mount helper we can get user name from alternate
  3090. locations such as env variables and files on disk */
  3091. return -EINVAL;
  3092. }
  3093. return 0;
  3094. }
  3095. static int
  3096. cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
  3097. unsigned int xid,
  3098. struct cifs_tcon *tcon,
  3099. struct cifs_sb_info *cifs_sb,
  3100. char *full_path,
  3101. int added_treename)
  3102. {
  3103. int rc;
  3104. char *s;
  3105. char sep, tmp;
  3106. int skip = added_treename ? 1 : 0;
  3107. sep = CIFS_DIR_SEP(cifs_sb);
  3108. s = full_path;
  3109. rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
  3110. while (rc == 0) {
  3111. /* skip separators */
  3112. while (*s == sep)
  3113. s++;
  3114. if (!*s)
  3115. break;
  3116. /* next separator */
  3117. while (*s && *s != sep)
  3118. s++;
  3119. /*
  3120. * if the treename is added, we then have to skip the first
  3121. * part within the separators
  3122. */
  3123. if (skip) {
  3124. skip = 0;
  3125. continue;
  3126. }
  3127. /*
  3128. * temporarily null-terminate the path at the end of
  3129. * the current component
  3130. */
  3131. tmp = *s;
  3132. *s = 0;
  3133. rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
  3134. full_path);
  3135. *s = tmp;
  3136. }
  3137. return rc;
  3138. }
  3139. /*
  3140. * Check if path is remote (i.e. a DFS share).
  3141. *
  3142. * Return -EREMOTE if it is, otherwise 0 or -errno.
  3143. */
  3144. static int is_path_remote(struct mount_ctx *mnt_ctx)
  3145. {
  3146. int rc;
  3147. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  3148. struct TCP_Server_Info *server = mnt_ctx->server;
  3149. unsigned int xid = mnt_ctx->xid;
  3150. struct cifs_tcon *tcon = mnt_ctx->tcon;
  3151. struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
  3152. char *full_path;
  3153. if (!server->ops->is_path_accessible)
  3154. return -EOPNOTSUPP;
  3155. /*
  3156. * cifs_build_path_to_root works only when we have a valid tcon
  3157. */
  3158. full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
  3159. tcon->Flags & SMB_SHARE_IS_IN_DFS);
  3160. if (full_path == NULL)
  3161. return -ENOMEM;
  3162. cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
  3163. rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
  3164. full_path);
  3165. if (rc != 0 && rc != -EREMOTE)
  3166. goto out;
  3167. if (rc != -EREMOTE) {
  3168. rc = cifs_are_all_path_components_accessible(server, xid, tcon,
  3169. cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
  3170. if (rc != 0) {
  3171. cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
  3172. cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
  3173. rc = 0;
  3174. }
  3175. }
  3176. out:
  3177. kfree(full_path);
  3178. return rc;
  3179. }
  3180. #ifdef CONFIG_CIFS_DFS_UPCALL
  3181. static void set_root_ses(struct mount_ctx *mnt_ctx)
  3182. {
  3183. if (mnt_ctx->ses) {
  3184. spin_lock(&cifs_tcp_ses_lock);
  3185. mnt_ctx->ses->ses_count++;
  3186. spin_unlock(&cifs_tcp_ses_lock);
  3187. dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
  3188. }
  3189. mnt_ctx->root_ses = mnt_ctx->ses;
  3190. }
  3191. static int is_dfs_mount(struct mount_ctx *mnt_ctx, bool *isdfs, struct dfs_cache_tgt_list *root_tl)
  3192. {
  3193. int rc;
  3194. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  3195. struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
  3196. *isdfs = true;
  3197. rc = mount_get_conns(mnt_ctx);
  3198. /*
  3199. * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
  3200. * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
  3201. *
  3202. * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
  3203. * to respond with PATH_NOT_COVERED to requests that include the prefix.
  3204. */
  3205. if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
  3206. dfs_cache_find(mnt_ctx->xid, mnt_ctx->ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
  3207. ctx->UNC + 1, NULL, root_tl)) {
  3208. if (rc)
  3209. return rc;
  3210. /* Check if it is fully accessible and then mount it */
  3211. rc = is_path_remote(mnt_ctx);
  3212. if (!rc)
  3213. *isdfs = false;
  3214. else if (rc != -EREMOTE)
  3215. return rc;
  3216. }
  3217. return 0;
  3218. }
  3219. static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
  3220. const char *ref_path, struct dfs_cache_tgt_iterator *tit)
  3221. {
  3222. int rc;
  3223. struct dfs_info3_param ref = {};
  3224. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  3225. char *oldmnt = cifs_sb->ctx->mount_options;
  3226. cifs_dbg(FYI, "%s: full_path=%s ref_path=%s target=%s\n", __func__, full_path, ref_path,
  3227. dfs_cache_get_tgt_name(tit));
  3228. rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
  3229. if (rc)
  3230. goto out;
  3231. rc = expand_dfs_referral(mnt_ctx, full_path, &ref);
  3232. if (rc)
  3233. goto out;
  3234. /* Connect to new target only if we were redirected (e.g. mount options changed) */
  3235. if (oldmnt != cifs_sb->ctx->mount_options) {
  3236. mount_put_conns(mnt_ctx);
  3237. rc = mount_get_dfs_conns(mnt_ctx);
  3238. }
  3239. if (!rc) {
  3240. if (cifs_is_referral_server(mnt_ctx->tcon, &ref))
  3241. set_root_ses(mnt_ctx);
  3242. rc = dfs_cache_update_tgthint(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
  3243. cifs_remap(cifs_sb), ref_path, tit);
  3244. }
  3245. out:
  3246. free_dfs_info_param(&ref);
  3247. return rc;
  3248. }
  3249. static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list *root_tl)
  3250. {
  3251. int rc;
  3252. char *full_path;
  3253. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  3254. struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
  3255. struct dfs_cache_tgt_iterator *tit;
  3256. /* Put initial connections as they might be shared with other mounts. We need unique dfs
  3257. * connections per mount to properly failover, so mount_get_dfs_conns() must be used from
  3258. * now on.
  3259. */
  3260. mount_put_conns(mnt_ctx);
  3261. mount_get_dfs_conns(mnt_ctx);
  3262. set_root_ses(mnt_ctx);
  3263. full_path = build_unc_path_to_root(ctx, cifs_sb, true);
  3264. if (IS_ERR(full_path))
  3265. return PTR_ERR(full_path);
  3266. mnt_ctx->origin_fullpath = dfs_cache_canonical_path(ctx->UNC, cifs_sb->local_nls,
  3267. cifs_remap(cifs_sb));
  3268. if (IS_ERR(mnt_ctx->origin_fullpath)) {
  3269. rc = PTR_ERR(mnt_ctx->origin_fullpath);
  3270. mnt_ctx->origin_fullpath = NULL;
  3271. goto out;
  3272. }
  3273. /* Try all dfs root targets */
  3274. for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(root_tl);
  3275. tit; tit = dfs_cache_get_next_tgt(root_tl, tit)) {
  3276. rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->origin_fullpath + 1, tit);
  3277. if (!rc) {
  3278. mnt_ctx->leaf_fullpath = kstrdup(mnt_ctx->origin_fullpath, GFP_KERNEL);
  3279. if (!mnt_ctx->leaf_fullpath)
  3280. rc = -ENOMEM;
  3281. break;
  3282. }
  3283. }
  3284. out:
  3285. kfree(full_path);
  3286. return rc;
  3287. }
  3288. static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
  3289. {
  3290. int rc;
  3291. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  3292. struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
  3293. char *full_path;
  3294. struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
  3295. struct dfs_cache_tgt_iterator *tit;
  3296. full_path = build_unc_path_to_root(ctx, cifs_sb, true);
  3297. if (IS_ERR(full_path))
  3298. return PTR_ERR(full_path);
  3299. kfree(mnt_ctx->leaf_fullpath);
  3300. mnt_ctx->leaf_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
  3301. cifs_remap(cifs_sb));
  3302. if (IS_ERR(mnt_ctx->leaf_fullpath)) {
  3303. rc = PTR_ERR(mnt_ctx->leaf_fullpath);
  3304. mnt_ctx->leaf_fullpath = NULL;
  3305. goto out;
  3306. }
  3307. /* Get referral from dfs link */
  3308. rc = dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
  3309. cifs_remap(cifs_sb), mnt_ctx->leaf_fullpath + 1, NULL, &tl);
  3310. if (rc)
  3311. goto out;
  3312. /* Try all dfs link targets. If an I/O fails from currently connected DFS target with an
  3313. * error other than STATUS_PATH_NOT_COVERED (-EREMOTE), then retry it from other targets as
  3314. * specified in MS-DFSC "3.1.5.2 I/O Operation to Target Fails with an Error Other Than
  3315. * STATUS_PATH_NOT_COVERED."
  3316. */
  3317. for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
  3318. tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
  3319. rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
  3320. if (!rc) {
  3321. rc = is_path_remote(mnt_ctx);
  3322. if (!rc || rc == -EREMOTE)
  3323. break;
  3324. }
  3325. }
  3326. out:
  3327. kfree(full_path);
  3328. dfs_cache_free_tgts(&tl);
  3329. return rc;
  3330. }
  3331. static int follow_dfs_link(struct mount_ctx *mnt_ctx)
  3332. {
  3333. int rc;
  3334. struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
  3335. struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
  3336. char *full_path;
  3337. int num_links = 0;
  3338. full_path = build_unc_path_to_root(ctx, cifs_sb, true);
  3339. if (IS_ERR(full_path))
  3340. return PTR_ERR(full_path);
  3341. kfree(mnt_ctx->origin_fullpath);
  3342. mnt_ctx->origin_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
  3343. cifs_remap(cifs_sb));
  3344. kfree(full_path);
  3345. if (IS_ERR(mnt_ctx->origin_fullpath)) {
  3346. rc = PTR_ERR(mnt_ctx->origin_fullpath);
  3347. mnt_ctx->origin_fullpath = NULL;
  3348. return rc;
  3349. }
  3350. do {
  3351. rc = __follow_dfs_link(mnt_ctx);
  3352. if (!rc || rc != -EREMOTE)
  3353. break;
  3354. } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
  3355. return rc;
  3356. }
  3357. /* Set up DFS referral paths for failover */
  3358. static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
  3359. {
  3360. struct TCP_Server_Info *server = mnt_ctx->server;
  3361. mutex_lock(&server->refpath_lock);
  3362. server->origin_fullpath = mnt_ctx->origin_fullpath;
  3363. server->leaf_fullpath = mnt_ctx->leaf_fullpath;
  3364. server->current_fullpath = mnt_ctx->leaf_fullpath;
  3365. mutex_unlock(&server->refpath_lock);
  3366. mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
  3367. }
  3368. int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
  3369. {
  3370. int rc;
  3371. struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
  3372. struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
  3373. bool isdfs;
  3374. rc = is_dfs_mount(&mnt_ctx, &isdfs, &tl);
  3375. if (rc)
  3376. goto error;
  3377. if (!isdfs)
  3378. goto out;
  3379. /* proceed as DFS mount */
  3380. uuid_gen(&mnt_ctx.mount_id);
  3381. rc = connect_dfs_root(&mnt_ctx, &tl);
  3382. dfs_cache_free_tgts(&tl);
  3383. if (rc)
  3384. goto error;
  3385. rc = is_path_remote(&mnt_ctx);
  3386. if (rc)
  3387. rc = follow_dfs_link(&mnt_ctx);
  3388. if (rc)
  3389. goto error;
  3390. setup_server_referral_paths(&mnt_ctx);
  3391. /*
  3392. * After reconnecting to a different server, unique ids won't match anymore, so we disable
  3393. * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
  3394. */
  3395. cifs_autodisable_serverino(cifs_sb);
  3396. /*
  3397. * Force the use of prefix path to support failover on DFS paths that resolve to targets
  3398. * that have different prefix paths.
  3399. */
  3400. cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
  3401. kfree(cifs_sb->prepath);
  3402. cifs_sb->prepath = ctx->prepath;
  3403. ctx->prepath = NULL;
  3404. uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
  3405. out:
  3406. cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
  3407. rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
  3408. if (rc)
  3409. goto error;
  3410. free_xid(mnt_ctx.xid);
  3411. return rc;
  3412. error:
  3413. dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
  3414. kfree(mnt_ctx.origin_fullpath);
  3415. kfree(mnt_ctx.leaf_fullpath);
  3416. mount_put_conns(&mnt_ctx);
  3417. return rc;
  3418. }
  3419. #else
  3420. int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
  3421. {
  3422. int rc = 0;
  3423. struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
  3424. rc = mount_get_conns(&mnt_ctx);
  3425. if (rc)
  3426. goto error;
  3427. if (mnt_ctx.tcon) {
  3428. rc = is_path_remote(&mnt_ctx);
  3429. if (rc == -EREMOTE)
  3430. rc = -EOPNOTSUPP;
  3431. if (rc)
  3432. goto error;
  3433. }
  3434. rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
  3435. if (rc)
  3436. goto error;
  3437. free_xid(mnt_ctx.xid);
  3438. return rc;
  3439. error:
  3440. mount_put_conns(&mnt_ctx);
  3441. return rc;
  3442. }
  3443. #endif
  3444. /*
  3445. * Issue a TREE_CONNECT request.
  3446. */
  3447. int
  3448. CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
  3449. const char *tree, struct cifs_tcon *tcon,
  3450. const struct nls_table *nls_codepage)
  3451. {
  3452. struct smb_hdr *smb_buffer;
  3453. struct smb_hdr *smb_buffer_response;
  3454. TCONX_REQ *pSMB;
  3455. TCONX_RSP *pSMBr;
  3456. unsigned char *bcc_ptr;
  3457. int rc = 0;
  3458. int length;
  3459. __u16 bytes_left, count;
  3460. if (ses == NULL)
  3461. return -EIO;
  3462. smb_buffer = cifs_buf_get();
  3463. if (smb_buffer == NULL)
  3464. return -ENOMEM;
  3465. smb_buffer_response = smb_buffer;
  3466. header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
  3467. NULL /*no tid */ , 4 /*wct */ );
  3468. smb_buffer->Mid = get_next_mid(ses->server);
  3469. smb_buffer->Uid = ses->Suid;
  3470. pSMB = (TCONX_REQ *) smb_buffer;
  3471. pSMBr = (TCONX_RSP *) smb_buffer_response;
  3472. pSMB->AndXCommand = 0xFF;
  3473. pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
  3474. bcc_ptr = &pSMB->Password[0];
  3475. pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
  3476. *bcc_ptr = 0; /* password is null byte */
  3477. bcc_ptr++; /* skip password */
  3478. /* already aligned so no need to do it below */
  3479. if (ses->server->sign)
  3480. smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
  3481. if (ses->capabilities & CAP_STATUS32) {
  3482. smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
  3483. }
  3484. if (ses->capabilities & CAP_DFS) {
  3485. smb_buffer->Flags2 |= SMBFLG2_DFS;
  3486. }
  3487. if (ses->capabilities & CAP_UNICODE) {
  3488. smb_buffer->Flags2 |= SMBFLG2_UNICODE;
  3489. length =
  3490. cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
  3491. 6 /* max utf8 char length in bytes */ *
  3492. (/* server len*/ + 256 /* share len */), nls_codepage);
  3493. bcc_ptr += 2 * length; /* convert num 16 bit words to bytes */
  3494. bcc_ptr += 2; /* skip trailing null */
  3495. } else { /* ASCII */
  3496. strcpy(bcc_ptr, tree);
  3497. bcc_ptr += strlen(tree) + 1;
  3498. }
  3499. strcpy(bcc_ptr, "?????");
  3500. bcc_ptr += strlen("?????");
  3501. bcc_ptr += 1;
  3502. count = bcc_ptr - &pSMB->Password[0];
  3503. be32_add_cpu(&pSMB->hdr.smb_buf_length, count);
  3504. pSMB->ByteCount = cpu_to_le16(count);
  3505. rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
  3506. 0);
  3507. /* above now done in SendReceive */
  3508. if (rc == 0) {
  3509. bool is_unicode;
  3510. tcon->tid = smb_buffer_response->Tid;
  3511. bcc_ptr = pByteArea(smb_buffer_response);
  3512. bytes_left = get_bcc(smb_buffer_response);
  3513. length = strnlen(bcc_ptr, bytes_left - 2);
  3514. if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
  3515. is_unicode = true;
  3516. else
  3517. is_unicode = false;
  3518. /* skip service field (NB: this field is always ASCII) */
  3519. if (length == 3) {
  3520. if ((bcc_ptr[0] == 'I') && (bcc_ptr[1] == 'P') &&
  3521. (bcc_ptr[2] == 'C')) {
  3522. cifs_dbg(FYI, "IPC connection\n");
  3523. tcon->ipc = true;
  3524. tcon->pipe = true;
  3525. }
  3526. } else if (length == 2) {
  3527. if ((bcc_ptr[0] == 'A') && (bcc_ptr[1] == ':')) {
  3528. /* the most common case */
  3529. cifs_dbg(FYI, "disk share connection\n");
  3530. }
  3531. }
  3532. bcc_ptr += length + 1;
  3533. bytes_left -= (length + 1);
  3534. strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
  3535. /* mostly informational -- no need to fail on error here */
  3536. kfree(tcon->nativeFileSystem);
  3537. tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
  3538. bytes_left, is_unicode,
  3539. nls_codepage);
  3540. cifs_dbg(FYI, "nativeFileSystem=%s\n", tcon->nativeFileSystem);
  3541. if ((smb_buffer_response->WordCount == 3) ||
  3542. (smb_buffer_response->WordCount == 7))
  3543. /* field is in same location */
  3544. tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
  3545. else
  3546. tcon->Flags = 0;
  3547. cifs_dbg(FYI, "Tcon flags: 0x%x\n", tcon->Flags);
  3548. }
  3549. cifs_buf_release(smb_buffer);
  3550. return rc;
  3551. }
  3552. static void delayed_free(struct rcu_head *p)
  3553. {
  3554. struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
  3555. unload_nls(cifs_sb->local_nls);
  3556. smb3_cleanup_fs_context(cifs_sb->ctx);
  3557. kfree(cifs_sb);
  3558. }
  3559. void
  3560. cifs_umount(struct cifs_sb_info *cifs_sb)
  3561. {
  3562. struct rb_root *root = &cifs_sb->tlink_tree;
  3563. struct rb_node *node;
  3564. struct tcon_link *tlink;
  3565. cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
  3566. spin_lock(&cifs_sb->tlink_tree_lock);
  3567. while ((node = rb_first(root))) {
  3568. tlink = rb_entry(node, struct tcon_link, tl_rbnode);
  3569. cifs_get_tlink(tlink);
  3570. clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
  3571. rb_erase(node, root);
  3572. spin_unlock(&cifs_sb->tlink_tree_lock);
  3573. cifs_put_tlink(tlink);
  3574. spin_lock(&cifs_sb->tlink_tree_lock);
  3575. }
  3576. spin_unlock(&cifs_sb->tlink_tree_lock);
  3577. kfree(cifs_sb->prepath);
  3578. #ifdef CONFIG_CIFS_DFS_UPCALL
  3579. dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
  3580. #endif
  3581. call_rcu(&cifs_sb->rcu, delayed_free);
  3582. }
  3583. int
  3584. cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
  3585. struct TCP_Server_Info *server)
  3586. {
  3587. int rc = 0;
  3588. if (!server->ops->need_neg || !server->ops->negotiate)
  3589. return -ENOSYS;
  3590. /* only send once per connect */
  3591. spin_lock(&server->srv_lock);
  3592. if (server->tcpStatus != CifsGood &&
  3593. server->tcpStatus != CifsNew &&
  3594. server->tcpStatus != CifsNeedNegotiate) {
  3595. spin_unlock(&server->srv_lock);
  3596. return -EHOSTDOWN;
  3597. }
  3598. if (!server->ops->need_neg(server) &&
  3599. server->tcpStatus == CifsGood) {
  3600. spin_unlock(&server->srv_lock);
  3601. return 0;
  3602. }
  3603. server->tcpStatus = CifsInNegotiate;
  3604. spin_unlock(&server->srv_lock);
  3605. rc = server->ops->negotiate(xid, ses, server);
  3606. if (rc == 0) {
  3607. spin_lock(&server->srv_lock);
  3608. if (server->tcpStatus == CifsInNegotiate)
  3609. server->tcpStatus = CifsGood;
  3610. else
  3611. rc = -EHOSTDOWN;
  3612. spin_unlock(&server->srv_lock);
  3613. } else {
  3614. spin_lock(&server->srv_lock);
  3615. if (server->tcpStatus == CifsInNegotiate)
  3616. server->tcpStatus = CifsNeedNegotiate;
  3617. spin_unlock(&server->srv_lock);
  3618. }
  3619. return rc;
  3620. }
  3621. int
  3622. cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
  3623. struct TCP_Server_Info *server,
  3624. struct nls_table *nls_info)
  3625. {
  3626. int rc = -ENOSYS;
  3627. struct TCP_Server_Info *pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
  3628. struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
  3629. struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
  3630. bool is_binding = false;
  3631. spin_lock(&ses->ses_lock);
  3632. cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
  3633. __func__, ses->chans_need_reconnect);
  3634. if (ses->ses_status != SES_GOOD &&
  3635. ses->ses_status != SES_NEW &&
  3636. ses->ses_status != SES_NEED_RECON) {
  3637. spin_unlock(&ses->ses_lock);
  3638. return -EHOSTDOWN;
  3639. }
  3640. /* only send once per connect */
  3641. spin_lock(&ses->chan_lock);
  3642. if (CIFS_ALL_CHANS_GOOD(ses)) {
  3643. if (ses->ses_status == SES_NEED_RECON)
  3644. ses->ses_status = SES_GOOD;
  3645. spin_unlock(&ses->chan_lock);
  3646. spin_unlock(&ses->ses_lock);
  3647. return 0;
  3648. }
  3649. cifs_chan_set_in_reconnect(ses, server);
  3650. is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
  3651. spin_unlock(&ses->chan_lock);
  3652. if (!is_binding) {
  3653. ses->ses_status = SES_IN_SETUP;
  3654. /* force iface_list refresh */
  3655. ses->iface_last_update = 0;
  3656. }
  3657. spin_unlock(&ses->ses_lock);
  3658. /* update ses ip_addr only for primary chan */
  3659. if (server == pserver) {
  3660. if (server->dstaddr.ss_family == AF_INET6)
  3661. scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
  3662. else
  3663. scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
  3664. }
  3665. if (!is_binding) {
  3666. ses->capabilities = server->capabilities;
  3667. if (!linuxExtEnabled)
  3668. ses->capabilities &= (~server->vals->cap_unix);
  3669. if (ses->auth_key.response) {
  3670. cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
  3671. ses->auth_key.response);
  3672. kfree_sensitive(ses->auth_key.response);
  3673. ses->auth_key.response = NULL;
  3674. ses->auth_key.len = 0;
  3675. }
  3676. }
  3677. cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
  3678. server->sec_mode, server->capabilities, server->timeAdj);
  3679. if (server->ops->sess_setup)
  3680. rc = server->ops->sess_setup(xid, ses, server, nls_info);
  3681. if (rc) {
  3682. cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
  3683. spin_lock(&ses->ses_lock);
  3684. if (ses->ses_status == SES_IN_SETUP)
  3685. ses->ses_status = SES_NEED_RECON;
  3686. spin_lock(&ses->chan_lock);
  3687. cifs_chan_clear_in_reconnect(ses, server);
  3688. spin_unlock(&ses->chan_lock);
  3689. spin_unlock(&ses->ses_lock);
  3690. } else {
  3691. spin_lock(&ses->ses_lock);
  3692. if (ses->ses_status == SES_IN_SETUP)
  3693. ses->ses_status = SES_GOOD;
  3694. spin_lock(&ses->chan_lock);
  3695. cifs_chan_clear_in_reconnect(ses, server);
  3696. cifs_chan_clear_need_reconnect(ses, server);
  3697. spin_unlock(&ses->chan_lock);
  3698. spin_unlock(&ses->ses_lock);
  3699. }
  3700. return rc;
  3701. }
  3702. static int
  3703. cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
  3704. {
  3705. ctx->sectype = ses->sectype;
  3706. /* krb5 is special, since we don't need username or pw */
  3707. if (ctx->sectype == Kerberos)
  3708. return 0;
  3709. return cifs_set_cifscreds(ctx, ses);
  3710. }
  3711. static struct cifs_tcon *
  3712. cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
  3713. {
  3714. int rc;
  3715. struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
  3716. struct cifs_ses *ses;
  3717. struct cifs_tcon *tcon = NULL;
  3718. struct smb3_fs_context *ctx;
  3719. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  3720. if (ctx == NULL)
  3721. return ERR_PTR(-ENOMEM);
  3722. ctx->local_nls = cifs_sb->local_nls;
  3723. ctx->linux_uid = fsuid;
  3724. ctx->cred_uid = fsuid;
  3725. ctx->UNC = master_tcon->tree_name;
  3726. ctx->retry = master_tcon->retry;
  3727. ctx->nocase = master_tcon->nocase;
  3728. ctx->nohandlecache = master_tcon->nohandlecache;
  3729. ctx->local_lease = master_tcon->local_lease;
  3730. ctx->no_lease = master_tcon->no_lease;
  3731. ctx->resilient = master_tcon->use_resilient;
  3732. ctx->persistent = master_tcon->use_persistent;
  3733. ctx->handle_timeout = master_tcon->handle_timeout;
  3734. ctx->no_linux_ext = !master_tcon->unix_ext;
  3735. ctx->linux_ext = master_tcon->posix_extensions;
  3736. ctx->sectype = master_tcon->ses->sectype;
  3737. ctx->sign = master_tcon->ses->sign;
  3738. ctx->seal = master_tcon->seal;
  3739. ctx->witness = master_tcon->use_witness;
  3740. rc = cifs_set_vol_auth(ctx, master_tcon->ses);
  3741. if (rc) {
  3742. tcon = ERR_PTR(rc);
  3743. goto out;
  3744. }
  3745. /* get a reference for the same TCP session */
  3746. spin_lock(&cifs_tcp_ses_lock);
  3747. ++master_tcon->ses->server->srv_count;
  3748. spin_unlock(&cifs_tcp_ses_lock);
  3749. ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
  3750. if (IS_ERR(ses)) {
  3751. tcon = (struct cifs_tcon *)ses;
  3752. cifs_put_tcp_session(master_tcon->ses->server, 0);
  3753. goto out;
  3754. }
  3755. tcon = cifs_get_tcon(ses, ctx);
  3756. if (IS_ERR(tcon)) {
  3757. cifs_put_smb_ses(ses);
  3758. goto out;
  3759. }
  3760. #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
  3761. if (cap_unix(ses))
  3762. reset_cifs_unix_caps(0, tcon, NULL, ctx);
  3763. #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
  3764. out:
  3765. kfree(ctx->username);
  3766. kfree_sensitive(ctx->password);
  3767. kfree(ctx);
  3768. return tcon;
  3769. }
  3770. struct cifs_tcon *
  3771. cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
  3772. {
  3773. return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
  3774. }
  3775. /* find and return a tlink with given uid */
  3776. static struct tcon_link *
  3777. tlink_rb_search(struct rb_root *root, kuid_t uid)
  3778. {
  3779. struct rb_node *node = root->rb_node;
  3780. struct tcon_link *tlink;
  3781. while (node) {
  3782. tlink = rb_entry(node, struct tcon_link, tl_rbnode);
  3783. if (uid_gt(tlink->tl_uid, uid))
  3784. node = node->rb_left;
  3785. else if (uid_lt(tlink->tl_uid, uid))
  3786. node = node->rb_right;
  3787. else
  3788. return tlink;
  3789. }
  3790. return NULL;
  3791. }
  3792. /* insert a tcon_link into the tree */
  3793. static void
  3794. tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
  3795. {
  3796. struct rb_node **new = &(root->rb_node), *parent = NULL;
  3797. struct tcon_link *tlink;
  3798. while (*new) {
  3799. tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
  3800. parent = *new;
  3801. if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
  3802. new = &((*new)->rb_left);
  3803. else
  3804. new = &((*new)->rb_right);
  3805. }
  3806. rb_link_node(&new_tlink->tl_rbnode, parent, new);
  3807. rb_insert_color(&new_tlink->tl_rbnode, root);
  3808. }
  3809. /*
  3810. * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
  3811. * current task.
  3812. *
  3813. * If the superblock doesn't refer to a multiuser mount, then just return
  3814. * the master tcon for the mount.
  3815. *
  3816. * First, search the rbtree for an existing tcon for this fsuid. If one
  3817. * exists, then check to see if it's pending construction. If it is then wait
  3818. * for construction to complete. Once it's no longer pending, check to see if
  3819. * it failed and either return an error or retry construction, depending on
  3820. * the timeout.
  3821. *
  3822. * If one doesn't exist then insert a new tcon_link struct into the tree and
  3823. * try to construct a new one.
  3824. */
  3825. struct tcon_link *
  3826. cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
  3827. {
  3828. int ret;
  3829. kuid_t fsuid = current_fsuid();
  3830. struct tcon_link *tlink, *newtlink;
  3831. if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
  3832. return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
  3833. spin_lock(&cifs_sb->tlink_tree_lock);
  3834. tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
  3835. if (tlink)
  3836. cifs_get_tlink(tlink);
  3837. spin_unlock(&cifs_sb->tlink_tree_lock);
  3838. if (tlink == NULL) {
  3839. newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
  3840. if (newtlink == NULL)
  3841. return ERR_PTR(-ENOMEM);
  3842. newtlink->tl_uid = fsuid;
  3843. newtlink->tl_tcon = ERR_PTR(-EACCES);
  3844. set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
  3845. set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
  3846. cifs_get_tlink(newtlink);
  3847. spin_lock(&cifs_sb->tlink_tree_lock);
  3848. /* was one inserted after previous search? */
  3849. tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
  3850. if (tlink) {
  3851. cifs_get_tlink(tlink);
  3852. spin_unlock(&cifs_sb->tlink_tree_lock);
  3853. kfree(newtlink);
  3854. goto wait_for_construction;
  3855. }
  3856. tlink = newtlink;
  3857. tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
  3858. spin_unlock(&cifs_sb->tlink_tree_lock);
  3859. } else {
  3860. wait_for_construction:
  3861. ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
  3862. TASK_INTERRUPTIBLE);
  3863. if (ret) {
  3864. cifs_put_tlink(tlink);
  3865. return ERR_PTR(-ERESTARTSYS);
  3866. }
  3867. /* if it's good, return it */
  3868. if (!IS_ERR(tlink->tl_tcon))
  3869. return tlink;
  3870. /* return error if we tried this already recently */
  3871. if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
  3872. cifs_put_tlink(tlink);
  3873. return ERR_PTR(-EACCES);
  3874. }
  3875. if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
  3876. goto wait_for_construction;
  3877. }
  3878. tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
  3879. clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
  3880. wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
  3881. if (IS_ERR(tlink->tl_tcon)) {
  3882. cifs_put_tlink(tlink);
  3883. return ERR_PTR(-EACCES);
  3884. }
  3885. return tlink;
  3886. }
  3887. /*
  3888. * periodic workqueue job that scans tcon_tree for a superblock and closes
  3889. * out tcons.
  3890. */
  3891. static void
  3892. cifs_prune_tlinks(struct work_struct *work)
  3893. {
  3894. struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
  3895. prune_tlinks.work);
  3896. struct rb_root *root = &cifs_sb->tlink_tree;
  3897. struct rb_node *node;
  3898. struct rb_node *tmp;
  3899. struct tcon_link *tlink;
  3900. /*
  3901. * Because we drop the spinlock in the loop in order to put the tlink
  3902. * it's not guarded against removal of links from the tree. The only
  3903. * places that remove entries from the tree are this function and
  3904. * umounts. Because this function is non-reentrant and is canceled
  3905. * before umount can proceed, this is safe.
  3906. */
  3907. spin_lock(&cifs_sb->tlink_tree_lock);
  3908. node = rb_first(root);
  3909. while (node != NULL) {
  3910. tmp = node;
  3911. node = rb_next(tmp);
  3912. tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
  3913. if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
  3914. atomic_read(&tlink->tl_count) != 0 ||
  3915. time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
  3916. continue;
  3917. cifs_get_tlink(tlink);
  3918. clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
  3919. rb_erase(tmp, root);
  3920. spin_unlock(&cifs_sb->tlink_tree_lock);
  3921. cifs_put_tlink(tlink);
  3922. spin_lock(&cifs_sb->tlink_tree_lock);
  3923. }
  3924. spin_unlock(&cifs_sb->tlink_tree_lock);
  3925. queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
  3926. TLINK_IDLE_EXPIRE);
  3927. }
  3928. #ifdef CONFIG_CIFS_DFS_UPCALL
  3929. /* Update dfs referral path of superblock */
  3930. static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
  3931. const char *target)
  3932. {
  3933. int rc = 0;
  3934. size_t len = strlen(target);
  3935. char *refpath, *npath;
  3936. if (unlikely(len < 2 || *target != '\\'))
  3937. return -EINVAL;
  3938. if (target[1] == '\\') {
  3939. len += 1;
  3940. refpath = kmalloc(len, GFP_KERNEL);
  3941. if (!refpath)
  3942. return -ENOMEM;
  3943. scnprintf(refpath, len, "%s", target);
  3944. } else {
  3945. len += sizeof("\\");
  3946. refpath = kmalloc(len, GFP_KERNEL);
  3947. if (!refpath)
  3948. return -ENOMEM;
  3949. scnprintf(refpath, len, "\\%s", target);
  3950. }
  3951. npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
  3952. kfree(refpath);
  3953. if (IS_ERR(npath)) {
  3954. rc = PTR_ERR(npath);
  3955. } else {
  3956. mutex_lock(&server->refpath_lock);
  3957. kfree(server->leaf_fullpath);
  3958. server->leaf_fullpath = npath;
  3959. mutex_unlock(&server->refpath_lock);
  3960. server->current_fullpath = server->leaf_fullpath;
  3961. }
  3962. return rc;
  3963. }
  3964. static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
  3965. size_t tcp_host_len, char *share, bool *target_match)
  3966. {
  3967. int rc = 0;
  3968. const char *dfs_host;
  3969. size_t dfs_host_len;
  3970. *target_match = true;
  3971. extract_unc_hostname(share, &dfs_host, &dfs_host_len);
  3972. /* Check if hostnames or addresses match */
  3973. if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
  3974. cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
  3975. dfs_host, (int)tcp_host_len, tcp_host);
  3976. rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
  3977. if (rc)
  3978. cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
  3979. }
  3980. return rc;
  3981. }
  3982. static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
  3983. struct cifs_sb_info *cifs_sb, char *tree, bool islink,
  3984. struct dfs_cache_tgt_list *tl)
  3985. {
  3986. int rc;
  3987. struct TCP_Server_Info *server = tcon->ses->server;
  3988. const struct smb_version_operations *ops = server->ops;
  3989. struct cifs_tcon *ipc = tcon->ses->tcon_ipc;
  3990. char *share = NULL, *prefix = NULL;
  3991. const char *tcp_host;
  3992. size_t tcp_host_len;
  3993. struct dfs_cache_tgt_iterator *tit;
  3994. bool target_match;
  3995. extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
  3996. tit = dfs_cache_get_tgt_iterator(tl);
  3997. if (!tit) {
  3998. rc = -ENOENT;
  3999. goto out;
  4000. }
  4001. /* Try to tree connect to all dfs targets */
  4002. for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
  4003. const char *target = dfs_cache_get_tgt_name(tit);
  4004. struct dfs_cache_tgt_list ntl = DFS_CACHE_TGT_LIST_INIT(ntl);
  4005. kfree(share);
  4006. kfree(prefix);
  4007. share = prefix = NULL;
  4008. /* Check if share matches with tcp ses */
  4009. rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
  4010. if (rc) {
  4011. cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
  4012. break;
  4013. }
  4014. rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
  4015. &target_match);
  4016. if (rc)
  4017. break;
  4018. if (!target_match) {
  4019. rc = -EHOSTUNREACH;
  4020. continue;
  4021. }
  4022. if (ipc->need_reconnect) {
  4023. scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
  4024. rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
  4025. if (rc)
  4026. break;
  4027. }
  4028. scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
  4029. if (!islink) {
  4030. rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
  4031. break;
  4032. }
  4033. /*
  4034. * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
  4035. * to it. Otherwise, cache the dfs referral and then mark current tcp ses for
  4036. * reconnect so either the demultiplex thread or the echo worker will reconnect to
  4037. * newly resolved target.
  4038. */
  4039. if (dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
  4040. NULL, &ntl)) {
  4041. rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
  4042. if (rc)
  4043. continue;
  4044. rc = dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
  4045. if (!rc)
  4046. rc = cifs_update_super_prepath(cifs_sb, prefix);
  4047. } else {
  4048. /* Target is another dfs share */
  4049. rc = update_server_fullpath(server, cifs_sb, target);
  4050. dfs_cache_free_tgts(tl);
  4051. if (!rc) {
  4052. rc = -EREMOTE;
  4053. list_replace_init(&ntl.tl_list, &tl->tl_list);
  4054. } else
  4055. dfs_cache_free_tgts(&ntl);
  4056. }
  4057. break;
  4058. }
  4059. out:
  4060. kfree(share);
  4061. kfree(prefix);
  4062. return rc;
  4063. }
  4064. static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
  4065. struct cifs_sb_info *cifs_sb, char *tree, bool islink,
  4066. struct dfs_cache_tgt_list *tl)
  4067. {
  4068. int rc;
  4069. int num_links = 0;
  4070. struct TCP_Server_Info *server = tcon->ses->server;
  4071. do {
  4072. rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
  4073. if (!rc || rc != -EREMOTE)
  4074. break;
  4075. } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
  4076. /*
  4077. * If we couldn't tree connect to any targets from last referral path, then retry from
  4078. * original referral path.
  4079. */
  4080. if (rc && server->current_fullpath != server->origin_fullpath) {
  4081. server->current_fullpath = server->origin_fullpath;
  4082. cifs_signal_cifsd_for_reconnect(server, true);
  4083. }
  4084. dfs_cache_free_tgts(tl);
  4085. return rc;
  4086. }
  4087. int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
  4088. {
  4089. int rc;
  4090. struct TCP_Server_Info *server = tcon->ses->server;
  4091. const struct smb_version_operations *ops = server->ops;
  4092. struct super_block *sb = NULL;
  4093. struct cifs_sb_info *cifs_sb;
  4094. struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
  4095. char *tree;
  4096. struct dfs_info3_param ref = {0};
  4097. /* only send once per connect */
  4098. spin_lock(&tcon->tc_lock);
  4099. if (tcon->ses->ses_status != SES_GOOD ||
  4100. (tcon->status != TID_NEW &&
  4101. tcon->status != TID_NEED_TCON)) {
  4102. spin_unlock(&tcon->tc_lock);
  4103. return 0;
  4104. }
  4105. tcon->status = TID_IN_TCON;
  4106. spin_unlock(&tcon->tc_lock);
  4107. tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
  4108. if (!tree) {
  4109. rc = -ENOMEM;
  4110. goto out;
  4111. }
  4112. if (tcon->ipc) {
  4113. scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
  4114. rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
  4115. goto out;
  4116. }
  4117. sb = cifs_get_tcp_super(server);
  4118. if (IS_ERR(sb)) {
  4119. rc = PTR_ERR(sb);
  4120. cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
  4121. goto out;
  4122. }
  4123. cifs_sb = CIFS_SB(sb);
  4124. /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
  4125. if (!server->current_fullpath ||
  4126. dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
  4127. rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls);
  4128. goto out;
  4129. }
  4130. rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK,
  4131. &tl);
  4132. free_dfs_info_param(&ref);
  4133. out:
  4134. kfree(tree);
  4135. cifs_put_tcp_super(sb);
  4136. if (rc) {
  4137. spin_lock(&tcon->tc_lock);
  4138. if (tcon->status == TID_IN_TCON)
  4139. tcon->status = TID_NEED_TCON;
  4140. spin_unlock(&tcon->tc_lock);
  4141. } else {
  4142. spin_lock(&tcon->tc_lock);
  4143. if (tcon->status == TID_IN_TCON)
  4144. tcon->status = TID_GOOD;
  4145. spin_unlock(&tcon->tc_lock);
  4146. tcon->need_reconnect = false;
  4147. }
  4148. return rc;
  4149. }
  4150. #else
  4151. int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
  4152. {
  4153. int rc;
  4154. const struct smb_version_operations *ops = tcon->ses->server->ops;
  4155. /* only send once per connect */
  4156. spin_lock(&tcon->tc_lock);
  4157. if (tcon->ses->ses_status != SES_GOOD ||
  4158. (tcon->status != TID_NEW &&
  4159. tcon->status != TID_NEED_TCON)) {
  4160. spin_unlock(&tcon->tc_lock);
  4161. return 0;
  4162. }
  4163. tcon->status = TID_IN_TCON;
  4164. spin_unlock(&tcon->tc_lock);
  4165. rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, nlsc);
  4166. if (rc) {
  4167. spin_lock(&tcon->tc_lock);
  4168. if (tcon->status == TID_IN_TCON)
  4169. tcon->status = TID_NEED_TCON;
  4170. spin_unlock(&tcon->tc_lock);
  4171. } else {
  4172. spin_lock(&tcon->tc_lock);
  4173. if (tcon->status == TID_IN_TCON)
  4174. tcon->status = TID_GOOD;
  4175. tcon->need_reconnect = false;
  4176. spin_unlock(&tcon->tc_lock);
  4177. }
  4178. return rc;
  4179. }
  4180. #endif