nfs4proc.c 288 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764
  1. /*
  2. * fs/nfs/nfs4proc.c
  3. *
  4. * Client-side procedure declarations for NFSv4.
  5. *
  6. * Copyright (c) 2002 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Kendrick Smith <[email protected]>
  10. * Andy Adamson <[email protected]>
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. *
  16. * 1. Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. * 2. Redistributions in binary form must reproduce the above copyright
  19. * notice, this list of conditions and the following disclaimer in the
  20. * documentation and/or other materials provided with the distribution.
  21. * 3. Neither the name of the University nor the names of its
  22. * contributors may be used to endorse or promote products derived
  23. * from this software without specific prior written permission.
  24. *
  25. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  26. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  27. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  28. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  29. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  32. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  33. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  34. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  35. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #include <linux/mm.h>
  38. #include <linux/delay.h>
  39. #include <linux/errno.h>
  40. #include <linux/string.h>
  41. #include <linux/ratelimit.h>
  42. #include <linux/printk.h>
  43. #include <linux/slab.h>
  44. #include <linux/sunrpc/clnt.h>
  45. #include <linux/nfs.h>
  46. #include <linux/nfs4.h>
  47. #include <linux/nfs_fs.h>
  48. #include <linux/nfs_page.h>
  49. #include <linux/nfs_mount.h>
  50. #include <linux/namei.h>
  51. #include <linux/mount.h>
  52. #include <linux/module.h>
  53. #include <linux/xattr.h>
  54. #include <linux/utsname.h>
  55. #include <linux/freezer.h>
  56. #include <linux/iversion.h>
  57. #include "nfs4_fs.h"
  58. #include "delegation.h"
  59. #include "internal.h"
  60. #include "iostat.h"
  61. #include "callback.h"
  62. #include "pnfs.h"
  63. #include "netns.h"
  64. #include "sysfs.h"
  65. #include "nfs4idmap.h"
  66. #include "nfs4session.h"
  67. #include "fscache.h"
  68. #include "nfs42.h"
  69. #include "nfs4trace.h"
  70. #define NFSDBG_FACILITY NFSDBG_PROC
  71. #define NFS4_BITMASK_SZ 3
  72. #define NFS4_POLL_RETRY_MIN (HZ/10)
  73. #define NFS4_POLL_RETRY_MAX (15*HZ)
  74. /* file attributes which can be mapped to nfs attributes */
  75. #define NFS4_VALID_ATTRS (ATTR_MODE \
  76. | ATTR_UID \
  77. | ATTR_GID \
  78. | ATTR_SIZE \
  79. | ATTR_ATIME \
  80. | ATTR_MTIME \
  81. | ATTR_CTIME \
  82. | ATTR_ATIME_SET \
  83. | ATTR_MTIME_SET)
  84. struct nfs4_opendata;
  85. static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
  86. static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
  87. static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
  88. static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
  89. struct nfs_fattr *fattr, struct inode *inode);
  90. static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
  91. struct nfs_fattr *fattr, struct iattr *sattr,
  92. struct nfs_open_context *ctx, struct nfs4_label *ilabel);
  93. #ifdef CONFIG_NFS_V4_1
  94. static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
  95. const struct cred *cred,
  96. struct nfs4_slot *slot,
  97. bool is_privileged);
  98. static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
  99. const struct cred *);
  100. static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
  101. const struct cred *, bool);
  102. #endif
  103. #ifdef CONFIG_NFS_V4_SECURITY_LABEL
  104. static inline struct nfs4_label *
  105. nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
  106. struct iattr *sattr, struct nfs4_label *label)
  107. {
  108. int err;
  109. if (label == NULL)
  110. return NULL;
  111. if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
  112. return NULL;
  113. label->lfs = 0;
  114. label->pi = 0;
  115. label->len = 0;
  116. label->label = NULL;
  117. err = security_dentry_init_security(dentry, sattr->ia_mode,
  118. &dentry->d_name, NULL,
  119. (void **)&label->label, &label->len);
  120. if (err == 0)
  121. return label;
  122. return NULL;
  123. }
  124. static inline void
  125. nfs4_label_release_security(struct nfs4_label *label)
  126. {
  127. if (label)
  128. security_release_secctx(label->label, label->len);
  129. }
  130. static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
  131. {
  132. if (label)
  133. return server->attr_bitmask;
  134. return server->attr_bitmask_nl;
  135. }
  136. #else
  137. static inline struct nfs4_label *
  138. nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
  139. struct iattr *sattr, struct nfs4_label *l)
  140. { return NULL; }
  141. static inline void
  142. nfs4_label_release_security(struct nfs4_label *label)
  143. { return; }
  144. static inline u32 *
  145. nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
  146. { return server->attr_bitmask; }
  147. #endif
  148. /* Prevent leaks of NFSv4 errors into userland */
  149. static int nfs4_map_errors(int err)
  150. {
  151. if (err >= -1000)
  152. return err;
  153. switch (err) {
  154. case -NFS4ERR_RESOURCE:
  155. case -NFS4ERR_LAYOUTTRYLATER:
  156. case -NFS4ERR_RECALLCONFLICT:
  157. return -EREMOTEIO;
  158. case -NFS4ERR_WRONGSEC:
  159. case -NFS4ERR_WRONG_CRED:
  160. return -EPERM;
  161. case -NFS4ERR_BADOWNER:
  162. case -NFS4ERR_BADNAME:
  163. return -EINVAL;
  164. case -NFS4ERR_SHARE_DENIED:
  165. return -EACCES;
  166. case -NFS4ERR_MINOR_VERS_MISMATCH:
  167. return -EPROTONOSUPPORT;
  168. case -NFS4ERR_FILE_OPEN:
  169. return -EBUSY;
  170. case -NFS4ERR_NOT_SAME:
  171. return -ENOTSYNC;
  172. default:
  173. dprintk("%s could not handle NFSv4 error %d\n",
  174. __func__, -err);
  175. break;
  176. }
  177. return -EIO;
  178. }
  179. /*
  180. * This is our standard bitmap for GETATTR requests.
  181. */
  182. const u32 nfs4_fattr_bitmap[3] = {
  183. FATTR4_WORD0_TYPE
  184. | FATTR4_WORD0_CHANGE
  185. | FATTR4_WORD0_SIZE
  186. | FATTR4_WORD0_FSID
  187. | FATTR4_WORD0_FILEID,
  188. FATTR4_WORD1_MODE
  189. | FATTR4_WORD1_NUMLINKS
  190. | FATTR4_WORD1_OWNER
  191. | FATTR4_WORD1_OWNER_GROUP
  192. | FATTR4_WORD1_RAWDEV
  193. | FATTR4_WORD1_SPACE_USED
  194. | FATTR4_WORD1_TIME_ACCESS
  195. | FATTR4_WORD1_TIME_METADATA
  196. | FATTR4_WORD1_TIME_MODIFY
  197. | FATTR4_WORD1_MOUNTED_ON_FILEID,
  198. #ifdef CONFIG_NFS_V4_SECURITY_LABEL
  199. FATTR4_WORD2_SECURITY_LABEL
  200. #endif
  201. };
  202. static const u32 nfs4_pnfs_open_bitmap[3] = {
  203. FATTR4_WORD0_TYPE
  204. | FATTR4_WORD0_CHANGE
  205. | FATTR4_WORD0_SIZE
  206. | FATTR4_WORD0_FSID
  207. | FATTR4_WORD0_FILEID,
  208. FATTR4_WORD1_MODE
  209. | FATTR4_WORD1_NUMLINKS
  210. | FATTR4_WORD1_OWNER
  211. | FATTR4_WORD1_OWNER_GROUP
  212. | FATTR4_WORD1_RAWDEV
  213. | FATTR4_WORD1_SPACE_USED
  214. | FATTR4_WORD1_TIME_ACCESS
  215. | FATTR4_WORD1_TIME_METADATA
  216. | FATTR4_WORD1_TIME_MODIFY,
  217. FATTR4_WORD2_MDSTHRESHOLD
  218. #ifdef CONFIG_NFS_V4_SECURITY_LABEL
  219. | FATTR4_WORD2_SECURITY_LABEL
  220. #endif
  221. };
  222. static const u32 nfs4_open_noattr_bitmap[3] = {
  223. FATTR4_WORD0_TYPE
  224. | FATTR4_WORD0_FILEID,
  225. };
  226. const u32 nfs4_statfs_bitmap[3] = {
  227. FATTR4_WORD0_FILES_AVAIL
  228. | FATTR4_WORD0_FILES_FREE
  229. | FATTR4_WORD0_FILES_TOTAL,
  230. FATTR4_WORD1_SPACE_AVAIL
  231. | FATTR4_WORD1_SPACE_FREE
  232. | FATTR4_WORD1_SPACE_TOTAL
  233. };
  234. const u32 nfs4_pathconf_bitmap[3] = {
  235. FATTR4_WORD0_MAXLINK
  236. | FATTR4_WORD0_MAXNAME,
  237. 0
  238. };
  239. const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
  240. | FATTR4_WORD0_MAXREAD
  241. | FATTR4_WORD0_MAXWRITE
  242. | FATTR4_WORD0_LEASE_TIME,
  243. FATTR4_WORD1_TIME_DELTA
  244. | FATTR4_WORD1_FS_LAYOUT_TYPES,
  245. FATTR4_WORD2_LAYOUT_BLKSIZE
  246. | FATTR4_WORD2_CLONE_BLKSIZE
  247. | FATTR4_WORD2_CHANGE_ATTR_TYPE
  248. | FATTR4_WORD2_XATTR_SUPPORT
  249. };
  250. const u32 nfs4_fs_locations_bitmap[3] = {
  251. FATTR4_WORD0_CHANGE
  252. | FATTR4_WORD0_SIZE
  253. | FATTR4_WORD0_FSID
  254. | FATTR4_WORD0_FILEID
  255. | FATTR4_WORD0_FS_LOCATIONS,
  256. FATTR4_WORD1_OWNER
  257. | FATTR4_WORD1_OWNER_GROUP
  258. | FATTR4_WORD1_RAWDEV
  259. | FATTR4_WORD1_SPACE_USED
  260. | FATTR4_WORD1_TIME_ACCESS
  261. | FATTR4_WORD1_TIME_METADATA
  262. | FATTR4_WORD1_TIME_MODIFY
  263. | FATTR4_WORD1_MOUNTED_ON_FILEID,
  264. };
  265. static void nfs4_bitmap_copy_adjust(__u32 *dst, const __u32 *src,
  266. struct inode *inode, unsigned long flags)
  267. {
  268. unsigned long cache_validity;
  269. memcpy(dst, src, NFS4_BITMASK_SZ*sizeof(*dst));
  270. if (!inode || !nfs4_have_delegation(inode, FMODE_READ))
  271. return;
  272. cache_validity = READ_ONCE(NFS_I(inode)->cache_validity) | flags;
  273. /* Remove the attributes over which we have full control */
  274. dst[1] &= ~FATTR4_WORD1_RAWDEV;
  275. if (!(cache_validity & NFS_INO_INVALID_SIZE))
  276. dst[0] &= ~FATTR4_WORD0_SIZE;
  277. if (!(cache_validity & NFS_INO_INVALID_CHANGE))
  278. dst[0] &= ~FATTR4_WORD0_CHANGE;
  279. if (!(cache_validity & NFS_INO_INVALID_MODE))
  280. dst[1] &= ~FATTR4_WORD1_MODE;
  281. if (!(cache_validity & NFS_INO_INVALID_OTHER))
  282. dst[1] &= ~(FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP);
  283. }
  284. static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
  285. struct nfs4_readdir_arg *readdir)
  286. {
  287. unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
  288. __be32 *start, *p;
  289. if (cookie > 2) {
  290. readdir->cookie = cookie;
  291. memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
  292. return;
  293. }
  294. readdir->cookie = 0;
  295. memset(&readdir->verifier, 0, sizeof(readdir->verifier));
  296. if (cookie == 2)
  297. return;
  298. /*
  299. * NFSv4 servers do not return entries for '.' and '..'
  300. * Therefore, we fake these entries here. We let '.'
  301. * have cookie 0 and '..' have cookie 1. Note that
  302. * when talking to the server, we always send cookie 0
  303. * instead of 1 or 2.
  304. */
  305. start = p = kmap_atomic(*readdir->pages);
  306. if (cookie == 0) {
  307. *p++ = xdr_one; /* next */
  308. *p++ = xdr_zero; /* cookie, first word */
  309. *p++ = xdr_one; /* cookie, second word */
  310. *p++ = xdr_one; /* entry len */
  311. memcpy(p, ".\0\0\0", 4); /* entry */
  312. p++;
  313. *p++ = xdr_one; /* bitmap length */
  314. *p++ = htonl(attrs); /* bitmap */
  315. *p++ = htonl(12); /* attribute buffer length */
  316. *p++ = htonl(NF4DIR);
  317. p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
  318. }
  319. *p++ = xdr_one; /* next */
  320. *p++ = xdr_zero; /* cookie, first word */
  321. *p++ = xdr_two; /* cookie, second word */
  322. *p++ = xdr_two; /* entry len */
  323. memcpy(p, "..\0\0", 4); /* entry */
  324. p++;
  325. *p++ = xdr_one; /* bitmap length */
  326. *p++ = htonl(attrs); /* bitmap */
  327. *p++ = htonl(12); /* attribute buffer length */
  328. *p++ = htonl(NF4DIR);
  329. p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
  330. readdir->pgbase = (char *)p - (char *)start;
  331. readdir->count -= readdir->pgbase;
  332. kunmap_atomic(start);
  333. }
  334. static void nfs4_fattr_set_prechange(struct nfs_fattr *fattr, u64 version)
  335. {
  336. if (!(fattr->valid & NFS_ATTR_FATTR_PRECHANGE)) {
  337. fattr->pre_change_attr = version;
  338. fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
  339. }
  340. }
  341. static void nfs4_test_and_free_stateid(struct nfs_server *server,
  342. nfs4_stateid *stateid,
  343. const struct cred *cred)
  344. {
  345. const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
  346. ops->test_and_free_expired(server, stateid, cred);
  347. }
  348. static void __nfs4_free_revoked_stateid(struct nfs_server *server,
  349. nfs4_stateid *stateid,
  350. const struct cred *cred)
  351. {
  352. stateid->type = NFS4_REVOKED_STATEID_TYPE;
  353. nfs4_test_and_free_stateid(server, stateid, cred);
  354. }
  355. static void nfs4_free_revoked_stateid(struct nfs_server *server,
  356. const nfs4_stateid *stateid,
  357. const struct cred *cred)
  358. {
  359. nfs4_stateid tmp;
  360. nfs4_stateid_copy(&tmp, stateid);
  361. __nfs4_free_revoked_stateid(server, &tmp, cred);
  362. }
  363. static long nfs4_update_delay(long *timeout)
  364. {
  365. long ret;
  366. if (!timeout)
  367. return NFS4_POLL_RETRY_MAX;
  368. if (*timeout <= 0)
  369. *timeout = NFS4_POLL_RETRY_MIN;
  370. if (*timeout > NFS4_POLL_RETRY_MAX)
  371. *timeout = NFS4_POLL_RETRY_MAX;
  372. ret = *timeout;
  373. *timeout <<= 1;
  374. return ret;
  375. }
  376. static int nfs4_delay_killable(long *timeout)
  377. {
  378. might_sleep();
  379. __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
  380. schedule_timeout(nfs4_update_delay(timeout));
  381. if (!__fatal_signal_pending(current))
  382. return 0;
  383. return -EINTR;
  384. }
  385. static int nfs4_delay_interruptible(long *timeout)
  386. {
  387. might_sleep();
  388. __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
  389. schedule_timeout(nfs4_update_delay(timeout));
  390. if (!signal_pending(current))
  391. return 0;
  392. return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS;
  393. }
  394. static int nfs4_delay(long *timeout, bool interruptible)
  395. {
  396. if (interruptible)
  397. return nfs4_delay_interruptible(timeout);
  398. return nfs4_delay_killable(timeout);
  399. }
  400. static const nfs4_stateid *
  401. nfs4_recoverable_stateid(const nfs4_stateid *stateid)
  402. {
  403. if (!stateid)
  404. return NULL;
  405. switch (stateid->type) {
  406. case NFS4_OPEN_STATEID_TYPE:
  407. case NFS4_LOCK_STATEID_TYPE:
  408. case NFS4_DELEGATION_STATEID_TYPE:
  409. return stateid;
  410. default:
  411. break;
  412. }
  413. return NULL;
  414. }
  415. /* This is the error handling routine for processes that are allowed
  416. * to sleep.
  417. */
  418. static int nfs4_do_handle_exception(struct nfs_server *server,
  419. int errorcode, struct nfs4_exception *exception)
  420. {
  421. struct nfs_client *clp = server->nfs_client;
  422. struct nfs4_state *state = exception->state;
  423. const nfs4_stateid *stateid;
  424. struct inode *inode = exception->inode;
  425. int ret = errorcode;
  426. exception->delay = 0;
  427. exception->recovering = 0;
  428. exception->retry = 0;
  429. stateid = nfs4_recoverable_stateid(exception->stateid);
  430. if (stateid == NULL && state != NULL)
  431. stateid = nfs4_recoverable_stateid(&state->stateid);
  432. switch(errorcode) {
  433. case 0:
  434. return 0;
  435. case -NFS4ERR_BADHANDLE:
  436. case -ESTALE:
  437. if (inode != NULL && S_ISREG(inode->i_mode))
  438. pnfs_destroy_layout(NFS_I(inode));
  439. break;
  440. case -NFS4ERR_DELEG_REVOKED:
  441. case -NFS4ERR_ADMIN_REVOKED:
  442. case -NFS4ERR_EXPIRED:
  443. case -NFS4ERR_BAD_STATEID:
  444. case -NFS4ERR_PARTNER_NO_AUTH:
  445. if (inode != NULL && stateid != NULL) {
  446. nfs_inode_find_state_and_recover(inode,
  447. stateid);
  448. goto wait_on_recovery;
  449. }
  450. fallthrough;
  451. case -NFS4ERR_OPENMODE:
  452. if (inode) {
  453. int err;
  454. err = nfs_async_inode_return_delegation(inode,
  455. stateid);
  456. if (err == 0)
  457. goto wait_on_recovery;
  458. if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
  459. exception->retry = 1;
  460. break;
  461. }
  462. }
  463. if (state == NULL)
  464. break;
  465. ret = nfs4_schedule_stateid_recovery(server, state);
  466. if (ret < 0)
  467. break;
  468. goto wait_on_recovery;
  469. case -NFS4ERR_STALE_STATEID:
  470. case -NFS4ERR_STALE_CLIENTID:
  471. nfs4_schedule_lease_recovery(clp);
  472. goto wait_on_recovery;
  473. case -NFS4ERR_MOVED:
  474. ret = nfs4_schedule_migration_recovery(server);
  475. if (ret < 0)
  476. break;
  477. goto wait_on_recovery;
  478. case -NFS4ERR_LEASE_MOVED:
  479. nfs4_schedule_lease_moved_recovery(clp);
  480. goto wait_on_recovery;
  481. #if defined(CONFIG_NFS_V4_1)
  482. case -NFS4ERR_BADSESSION:
  483. case -NFS4ERR_BADSLOT:
  484. case -NFS4ERR_BAD_HIGH_SLOT:
  485. case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
  486. case -NFS4ERR_DEADSESSION:
  487. case -NFS4ERR_SEQ_FALSE_RETRY:
  488. case -NFS4ERR_SEQ_MISORDERED:
  489. /* Handled in nfs41_sequence_process() */
  490. goto wait_on_recovery;
  491. #endif /* defined(CONFIG_NFS_V4_1) */
  492. case -NFS4ERR_FILE_OPEN:
  493. if (exception->timeout > HZ) {
  494. /* We have retried a decent amount, time to
  495. * fail
  496. */
  497. ret = -EBUSY;
  498. break;
  499. }
  500. fallthrough;
  501. case -NFS4ERR_DELAY:
  502. nfs_inc_server_stats(server, NFSIOS_DELAY);
  503. fallthrough;
  504. case -NFS4ERR_GRACE:
  505. case -NFS4ERR_LAYOUTTRYLATER:
  506. case -NFS4ERR_RECALLCONFLICT:
  507. exception->delay = 1;
  508. return 0;
  509. case -NFS4ERR_RETRY_UNCACHED_REP:
  510. case -NFS4ERR_OLD_STATEID:
  511. exception->retry = 1;
  512. break;
  513. case -NFS4ERR_BADOWNER:
  514. /* The following works around a Linux server bug! */
  515. case -NFS4ERR_BADNAME:
  516. if (server->caps & NFS_CAP_UIDGID_NOMAP) {
  517. server->caps &= ~NFS_CAP_UIDGID_NOMAP;
  518. exception->retry = 1;
  519. printk(KERN_WARNING "NFS: v4 server %s "
  520. "does not accept raw "
  521. "uid/gids. "
  522. "Reenabling the idmapper.\n",
  523. server->nfs_client->cl_hostname);
  524. }
  525. }
  526. /* We failed to handle the error */
  527. return nfs4_map_errors(ret);
  528. wait_on_recovery:
  529. exception->recovering = 1;
  530. return 0;
  531. }
  532. /* This is the error handling routine for processes that are allowed
  533. * to sleep.
  534. */
  535. int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
  536. {
  537. struct nfs_client *clp = server->nfs_client;
  538. int ret;
  539. ret = nfs4_do_handle_exception(server, errorcode, exception);
  540. if (exception->delay) {
  541. ret = nfs4_delay(&exception->timeout,
  542. exception->interruptible);
  543. goto out_retry;
  544. }
  545. if (exception->recovering) {
  546. if (exception->task_is_privileged)
  547. return -EDEADLOCK;
  548. ret = nfs4_wait_clnt_recover(clp);
  549. if (test_bit(NFS_MIG_FAILED, &server->mig_status))
  550. return -EIO;
  551. goto out_retry;
  552. }
  553. return ret;
  554. out_retry:
  555. if (ret == 0)
  556. exception->retry = 1;
  557. return ret;
  558. }
  559. static int
  560. nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
  561. int errorcode, struct nfs4_exception *exception)
  562. {
  563. struct nfs_client *clp = server->nfs_client;
  564. int ret;
  565. ret = nfs4_do_handle_exception(server, errorcode, exception);
  566. if (exception->delay) {
  567. rpc_delay(task, nfs4_update_delay(&exception->timeout));
  568. goto out_retry;
  569. }
  570. if (exception->recovering) {
  571. if (exception->task_is_privileged)
  572. return -EDEADLOCK;
  573. rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
  574. if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
  575. rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
  576. goto out_retry;
  577. }
  578. if (test_bit(NFS_MIG_FAILED, &server->mig_status))
  579. ret = -EIO;
  580. return ret;
  581. out_retry:
  582. if (ret == 0) {
  583. exception->retry = 1;
  584. /*
  585. * For NFS4ERR_MOVED, the client transport will need to
  586. * be recomputed after migration recovery has completed.
  587. */
  588. if (errorcode == -NFS4ERR_MOVED)
  589. rpc_task_release_transport(task);
  590. }
  591. return ret;
  592. }
  593. int
  594. nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
  595. struct nfs4_state *state, long *timeout)
  596. {
  597. struct nfs4_exception exception = {
  598. .state = state,
  599. };
  600. if (task->tk_status >= 0)
  601. return 0;
  602. if (timeout)
  603. exception.timeout = *timeout;
  604. task->tk_status = nfs4_async_handle_exception(task, server,
  605. task->tk_status,
  606. &exception);
  607. if (exception.delay && timeout)
  608. *timeout = exception.timeout;
  609. if (exception.retry)
  610. return -EAGAIN;
  611. return 0;
  612. }
  613. /*
  614. * Return 'true' if 'clp' is using an rpc_client that is integrity protected
  615. * or 'false' otherwise.
  616. */
  617. static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
  618. {
  619. rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
  620. return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
  621. }
  622. static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
  623. {
  624. spin_lock(&clp->cl_lock);
  625. if (time_before(clp->cl_last_renewal,timestamp))
  626. clp->cl_last_renewal = timestamp;
  627. spin_unlock(&clp->cl_lock);
  628. }
  629. static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
  630. {
  631. struct nfs_client *clp = server->nfs_client;
  632. if (!nfs4_has_session(clp))
  633. do_renew_lease(clp, timestamp);
  634. }
  635. struct nfs4_call_sync_data {
  636. const struct nfs_server *seq_server;
  637. struct nfs4_sequence_args *seq_args;
  638. struct nfs4_sequence_res *seq_res;
  639. };
  640. void nfs4_init_sequence(struct nfs4_sequence_args *args,
  641. struct nfs4_sequence_res *res, int cache_reply,
  642. int privileged)
  643. {
  644. args->sa_slot = NULL;
  645. args->sa_cache_this = cache_reply;
  646. args->sa_privileged = privileged;
  647. res->sr_slot = NULL;
  648. }
  649. static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
  650. {
  651. struct nfs4_slot *slot = res->sr_slot;
  652. struct nfs4_slot_table *tbl;
  653. tbl = slot->table;
  654. spin_lock(&tbl->slot_tbl_lock);
  655. if (!nfs41_wake_and_assign_slot(tbl, slot))
  656. nfs4_free_slot(tbl, slot);
  657. spin_unlock(&tbl->slot_tbl_lock);
  658. res->sr_slot = NULL;
  659. }
  660. static int nfs40_sequence_done(struct rpc_task *task,
  661. struct nfs4_sequence_res *res)
  662. {
  663. if (res->sr_slot != NULL)
  664. nfs40_sequence_free_slot(res);
  665. return 1;
  666. }
  667. #if defined(CONFIG_NFS_V4_1)
  668. static void nfs41_release_slot(struct nfs4_slot *slot)
  669. {
  670. struct nfs4_session *session;
  671. struct nfs4_slot_table *tbl;
  672. bool send_new_highest_used_slotid = false;
  673. if (!slot)
  674. return;
  675. tbl = slot->table;
  676. session = tbl->session;
  677. /* Bump the slot sequence number */
  678. if (slot->seq_done)
  679. slot->seq_nr++;
  680. slot->seq_done = 0;
  681. spin_lock(&tbl->slot_tbl_lock);
  682. /* Be nice to the server: try to ensure that the last transmitted
  683. * value for highest_user_slotid <= target_highest_slotid
  684. */
  685. if (tbl->highest_used_slotid > tbl->target_highest_slotid)
  686. send_new_highest_used_slotid = true;
  687. if (nfs41_wake_and_assign_slot(tbl, slot)) {
  688. send_new_highest_used_slotid = false;
  689. goto out_unlock;
  690. }
  691. nfs4_free_slot(tbl, slot);
  692. if (tbl->highest_used_slotid != NFS4_NO_SLOT)
  693. send_new_highest_used_slotid = false;
  694. out_unlock:
  695. spin_unlock(&tbl->slot_tbl_lock);
  696. if (send_new_highest_used_slotid)
  697. nfs41_notify_server(session->clp);
  698. if (waitqueue_active(&tbl->slot_waitq))
  699. wake_up_all(&tbl->slot_waitq);
  700. }
  701. static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
  702. {
  703. nfs41_release_slot(res->sr_slot);
  704. res->sr_slot = NULL;
  705. }
  706. static void nfs4_slot_sequence_record_sent(struct nfs4_slot *slot,
  707. u32 seqnr)
  708. {
  709. if ((s32)(seqnr - slot->seq_nr_highest_sent) > 0)
  710. slot->seq_nr_highest_sent = seqnr;
  711. }
  712. static void nfs4_slot_sequence_acked(struct nfs4_slot *slot, u32 seqnr)
  713. {
  714. nfs4_slot_sequence_record_sent(slot, seqnr);
  715. slot->seq_nr_last_acked = seqnr;
  716. }
  717. static void nfs4_probe_sequence(struct nfs_client *client, const struct cred *cred,
  718. struct nfs4_slot *slot)
  719. {
  720. struct rpc_task *task = _nfs41_proc_sequence(client, cred, slot, true);
  721. if (!IS_ERR(task))
  722. rpc_put_task_async(task);
  723. }
  724. static int nfs41_sequence_process(struct rpc_task *task,
  725. struct nfs4_sequence_res *res)
  726. {
  727. struct nfs4_session *session;
  728. struct nfs4_slot *slot = res->sr_slot;
  729. struct nfs_client *clp;
  730. int status;
  731. int ret = 1;
  732. if (slot == NULL)
  733. goto out_noaction;
  734. /* don't increment the sequence number if the task wasn't sent */
  735. if (!RPC_WAS_SENT(task) || slot->seq_done)
  736. goto out;
  737. session = slot->table->session;
  738. clp = session->clp;
  739. trace_nfs4_sequence_done(session, res);
  740. status = res->sr_status;
  741. if (task->tk_status == -NFS4ERR_DEADSESSION)
  742. status = -NFS4ERR_DEADSESSION;
  743. /* Check the SEQUENCE operation status */
  744. switch (status) {
  745. case 0:
  746. /* Mark this sequence number as having been acked */
  747. nfs4_slot_sequence_acked(slot, slot->seq_nr);
  748. /* Update the slot's sequence and clientid lease timer */
  749. slot->seq_done = 1;
  750. do_renew_lease(clp, res->sr_timestamp);
  751. /* Check sequence flags */
  752. nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
  753. !!slot->privileged);
  754. nfs41_update_target_slotid(slot->table, slot, res);
  755. break;
  756. case 1:
  757. /*
  758. * sr_status remains 1 if an RPC level error occurred.
  759. * The server may or may not have processed the sequence
  760. * operation..
  761. */
  762. nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
  763. slot->seq_done = 1;
  764. goto out;
  765. case -NFS4ERR_DELAY:
  766. /* The server detected a resend of the RPC call and
  767. * returned NFS4ERR_DELAY as per Section 2.10.6.2
  768. * of RFC5661.
  769. */
  770. dprintk("%s: slot=%u seq=%u: Operation in progress\n",
  771. __func__,
  772. slot->slot_nr,
  773. slot->seq_nr);
  774. goto out_retry;
  775. case -NFS4ERR_RETRY_UNCACHED_REP:
  776. case -NFS4ERR_SEQ_FALSE_RETRY:
  777. /*
  778. * The server thinks we tried to replay a request.
  779. * Retry the call after bumping the sequence ID.
  780. */
  781. nfs4_slot_sequence_acked(slot, slot->seq_nr);
  782. goto retry_new_seq;
  783. case -NFS4ERR_BADSLOT:
  784. /*
  785. * The slot id we used was probably retired. Try again
  786. * using a different slot id.
  787. */
  788. if (slot->slot_nr < slot->table->target_highest_slotid)
  789. goto session_recover;
  790. goto retry_nowait;
  791. case -NFS4ERR_SEQ_MISORDERED:
  792. nfs4_slot_sequence_record_sent(slot, slot->seq_nr);
  793. /*
  794. * Were one or more calls using this slot interrupted?
  795. * If the server never received the request, then our
  796. * transmitted slot sequence number may be too high. However,
  797. * if the server did receive the request then it might
  798. * accidentally give us a reply with a mismatched operation.
  799. * We can sort this out by sending a lone sequence operation
  800. * to the server on the same slot.
  801. */
  802. if ((s32)(slot->seq_nr - slot->seq_nr_last_acked) > 1) {
  803. slot->seq_nr--;
  804. if (task->tk_msg.rpc_proc != &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE]) {
  805. nfs4_probe_sequence(clp, task->tk_msg.rpc_cred, slot);
  806. res->sr_slot = NULL;
  807. }
  808. goto retry_nowait;
  809. }
  810. /*
  811. * RFC5661:
  812. * A retry might be sent while the original request is
  813. * still in progress on the replier. The replier SHOULD
  814. * deal with the issue by returning NFS4ERR_DELAY as the
  815. * reply to SEQUENCE or CB_SEQUENCE operation, but
  816. * implementations MAY return NFS4ERR_SEQ_MISORDERED.
  817. *
  818. * Restart the search after a delay.
  819. */
  820. slot->seq_nr = slot->seq_nr_highest_sent;
  821. goto out_retry;
  822. case -NFS4ERR_BADSESSION:
  823. case -NFS4ERR_DEADSESSION:
  824. case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
  825. goto session_recover;
  826. default:
  827. /* Just update the slot sequence no. */
  828. slot->seq_done = 1;
  829. }
  830. out:
  831. /* The session may be reset by one of the error handlers. */
  832. dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
  833. out_noaction:
  834. return ret;
  835. session_recover:
  836. set_bit(NFS4_SLOT_TBL_DRAINING, &session->fc_slot_table.slot_tbl_state);
  837. nfs4_schedule_session_recovery(session, status);
  838. dprintk("%s ERROR: %d Reset session\n", __func__, status);
  839. nfs41_sequence_free_slot(res);
  840. goto out;
  841. retry_new_seq:
  842. ++slot->seq_nr;
  843. retry_nowait:
  844. if (rpc_restart_call_prepare(task)) {
  845. nfs41_sequence_free_slot(res);
  846. task->tk_status = 0;
  847. ret = 0;
  848. }
  849. goto out;
  850. out_retry:
  851. if (!rpc_restart_call(task))
  852. goto out;
  853. rpc_delay(task, NFS4_POLL_RETRY_MAX);
  854. return 0;
  855. }
  856. int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
  857. {
  858. if (!nfs41_sequence_process(task, res))
  859. return 0;
  860. if (res->sr_slot != NULL)
  861. nfs41_sequence_free_slot(res);
  862. return 1;
  863. }
  864. EXPORT_SYMBOL_GPL(nfs41_sequence_done);
  865. static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
  866. {
  867. if (res->sr_slot == NULL)
  868. return 1;
  869. if (res->sr_slot->table->session != NULL)
  870. return nfs41_sequence_process(task, res);
  871. return nfs40_sequence_done(task, res);
  872. }
  873. static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
  874. {
  875. if (res->sr_slot != NULL) {
  876. if (res->sr_slot->table->session != NULL)
  877. nfs41_sequence_free_slot(res);
  878. else
  879. nfs40_sequence_free_slot(res);
  880. }
  881. }
  882. int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
  883. {
  884. if (res->sr_slot == NULL)
  885. return 1;
  886. if (!res->sr_slot->table->session)
  887. return nfs40_sequence_done(task, res);
  888. return nfs41_sequence_done(task, res);
  889. }
  890. EXPORT_SYMBOL_GPL(nfs4_sequence_done);
  891. static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
  892. {
  893. struct nfs4_call_sync_data *data = calldata;
  894. dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
  895. nfs4_setup_sequence(data->seq_server->nfs_client,
  896. data->seq_args, data->seq_res, task);
  897. }
  898. static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
  899. {
  900. struct nfs4_call_sync_data *data = calldata;
  901. nfs41_sequence_done(task, data->seq_res);
  902. }
  903. static const struct rpc_call_ops nfs41_call_sync_ops = {
  904. .rpc_call_prepare = nfs41_call_sync_prepare,
  905. .rpc_call_done = nfs41_call_sync_done,
  906. };
  907. #else /* !CONFIG_NFS_V4_1 */
  908. static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
  909. {
  910. return nfs40_sequence_done(task, res);
  911. }
  912. static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
  913. {
  914. if (res->sr_slot != NULL)
  915. nfs40_sequence_free_slot(res);
  916. }
  917. int nfs4_sequence_done(struct rpc_task *task,
  918. struct nfs4_sequence_res *res)
  919. {
  920. return nfs40_sequence_done(task, res);
  921. }
  922. EXPORT_SYMBOL_GPL(nfs4_sequence_done);
  923. #endif /* !CONFIG_NFS_V4_1 */
  924. static void nfs41_sequence_res_init(struct nfs4_sequence_res *res)
  925. {
  926. res->sr_timestamp = jiffies;
  927. res->sr_status_flags = 0;
  928. res->sr_status = 1;
  929. }
  930. static
  931. void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
  932. struct nfs4_sequence_res *res,
  933. struct nfs4_slot *slot)
  934. {
  935. if (!slot)
  936. return;
  937. slot->privileged = args->sa_privileged ? 1 : 0;
  938. args->sa_slot = slot;
  939. res->sr_slot = slot;
  940. }
  941. int nfs4_setup_sequence(struct nfs_client *client,
  942. struct nfs4_sequence_args *args,
  943. struct nfs4_sequence_res *res,
  944. struct rpc_task *task)
  945. {
  946. struct nfs4_session *session = nfs4_get_session(client);
  947. struct nfs4_slot_table *tbl = client->cl_slot_tbl;
  948. struct nfs4_slot *slot;
  949. /* slot already allocated? */
  950. if (res->sr_slot != NULL)
  951. goto out_start;
  952. if (session)
  953. tbl = &session->fc_slot_table;
  954. spin_lock(&tbl->slot_tbl_lock);
  955. /* The state manager will wait until the slot table is empty */
  956. if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
  957. goto out_sleep;
  958. slot = nfs4_alloc_slot(tbl);
  959. if (IS_ERR(slot)) {
  960. if (slot == ERR_PTR(-ENOMEM))
  961. goto out_sleep_timeout;
  962. goto out_sleep;
  963. }
  964. spin_unlock(&tbl->slot_tbl_lock);
  965. nfs4_sequence_attach_slot(args, res, slot);
  966. trace_nfs4_setup_sequence(session, args);
  967. out_start:
  968. nfs41_sequence_res_init(res);
  969. rpc_call_start(task);
  970. return 0;
  971. out_sleep_timeout:
  972. /* Try again in 1/4 second */
  973. if (args->sa_privileged)
  974. rpc_sleep_on_priority_timeout(&tbl->slot_tbl_waitq, task,
  975. jiffies + (HZ >> 2), RPC_PRIORITY_PRIVILEGED);
  976. else
  977. rpc_sleep_on_timeout(&tbl->slot_tbl_waitq, task,
  978. NULL, jiffies + (HZ >> 2));
  979. spin_unlock(&tbl->slot_tbl_lock);
  980. return -EAGAIN;
  981. out_sleep:
  982. if (args->sa_privileged)
  983. rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
  984. RPC_PRIORITY_PRIVILEGED);
  985. else
  986. rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
  987. spin_unlock(&tbl->slot_tbl_lock);
  988. return -EAGAIN;
  989. }
  990. EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
  991. static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
  992. {
  993. struct nfs4_call_sync_data *data = calldata;
  994. nfs4_setup_sequence(data->seq_server->nfs_client,
  995. data->seq_args, data->seq_res, task);
  996. }
  997. static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
  998. {
  999. struct nfs4_call_sync_data *data = calldata;
  1000. nfs4_sequence_done(task, data->seq_res);
  1001. }
  1002. static const struct rpc_call_ops nfs40_call_sync_ops = {
  1003. .rpc_call_prepare = nfs40_call_sync_prepare,
  1004. .rpc_call_done = nfs40_call_sync_done,
  1005. };
  1006. static int nfs4_call_sync_custom(struct rpc_task_setup *task_setup)
  1007. {
  1008. int ret;
  1009. struct rpc_task *task;
  1010. task = rpc_run_task(task_setup);
  1011. if (IS_ERR(task))
  1012. return PTR_ERR(task);
  1013. ret = task->tk_status;
  1014. rpc_put_task(task);
  1015. return ret;
  1016. }
  1017. static int nfs4_do_call_sync(struct rpc_clnt *clnt,
  1018. struct nfs_server *server,
  1019. struct rpc_message *msg,
  1020. struct nfs4_sequence_args *args,
  1021. struct nfs4_sequence_res *res,
  1022. unsigned short task_flags)
  1023. {
  1024. struct nfs_client *clp = server->nfs_client;
  1025. struct nfs4_call_sync_data data = {
  1026. .seq_server = server,
  1027. .seq_args = args,
  1028. .seq_res = res,
  1029. };
  1030. struct rpc_task_setup task_setup = {
  1031. .rpc_client = clnt,
  1032. .rpc_message = msg,
  1033. .callback_ops = clp->cl_mvops->call_sync_ops,
  1034. .callback_data = &data,
  1035. .flags = task_flags,
  1036. };
  1037. return nfs4_call_sync_custom(&task_setup);
  1038. }
  1039. static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
  1040. struct nfs_server *server,
  1041. struct rpc_message *msg,
  1042. struct nfs4_sequence_args *args,
  1043. struct nfs4_sequence_res *res)
  1044. {
  1045. unsigned short task_flags = 0;
  1046. if (server->caps & NFS_CAP_MOVEABLE)
  1047. task_flags = RPC_TASK_MOVEABLE;
  1048. return nfs4_do_call_sync(clnt, server, msg, args, res, task_flags);
  1049. }
  1050. int nfs4_call_sync(struct rpc_clnt *clnt,
  1051. struct nfs_server *server,
  1052. struct rpc_message *msg,
  1053. struct nfs4_sequence_args *args,
  1054. struct nfs4_sequence_res *res,
  1055. int cache_reply)
  1056. {
  1057. nfs4_init_sequence(args, res, cache_reply, 0);
  1058. return nfs4_call_sync_sequence(clnt, server, msg, args, res);
  1059. }
  1060. static void
  1061. nfs4_inc_nlink_locked(struct inode *inode)
  1062. {
  1063. nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
  1064. NFS_INO_INVALID_CTIME |
  1065. NFS_INO_INVALID_NLINK);
  1066. inc_nlink(inode);
  1067. }
  1068. static void
  1069. nfs4_inc_nlink(struct inode *inode)
  1070. {
  1071. spin_lock(&inode->i_lock);
  1072. nfs4_inc_nlink_locked(inode);
  1073. spin_unlock(&inode->i_lock);
  1074. }
  1075. static void
  1076. nfs4_dec_nlink_locked(struct inode *inode)
  1077. {
  1078. nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
  1079. NFS_INO_INVALID_CTIME |
  1080. NFS_INO_INVALID_NLINK);
  1081. drop_nlink(inode);
  1082. }
  1083. static void
  1084. nfs4_update_changeattr_locked(struct inode *inode,
  1085. struct nfs4_change_info *cinfo,
  1086. unsigned long timestamp, unsigned long cache_validity)
  1087. {
  1088. struct nfs_inode *nfsi = NFS_I(inode);
  1089. u64 change_attr = inode_peek_iversion_raw(inode);
  1090. cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
  1091. if (S_ISDIR(inode->i_mode))
  1092. cache_validity |= NFS_INO_INVALID_DATA;
  1093. switch (NFS_SERVER(inode)->change_attr_type) {
  1094. case NFS4_CHANGE_TYPE_IS_UNDEFINED:
  1095. if (cinfo->after == change_attr)
  1096. goto out;
  1097. break;
  1098. default:
  1099. if ((s64)(change_attr - cinfo->after) >= 0)
  1100. goto out;
  1101. }
  1102. inode_set_iversion_raw(inode, cinfo->after);
  1103. if (!cinfo->atomic || cinfo->before != change_attr) {
  1104. if (S_ISDIR(inode->i_mode))
  1105. nfs_force_lookup_revalidate(inode);
  1106. if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
  1107. cache_validity |=
  1108. NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
  1109. NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
  1110. NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
  1111. NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
  1112. nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
  1113. }
  1114. nfsi->attrtimeo_timestamp = jiffies;
  1115. nfsi->read_cache_jiffies = timestamp;
  1116. nfsi->attr_gencount = nfs_inc_attr_generation_counter();
  1117. nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
  1118. out:
  1119. nfs_set_cache_invalid(inode, cache_validity);
  1120. }
  1121. void
  1122. nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
  1123. unsigned long timestamp, unsigned long cache_validity)
  1124. {
  1125. spin_lock(&dir->i_lock);
  1126. nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity);
  1127. spin_unlock(&dir->i_lock);
  1128. }
  1129. struct nfs4_open_createattrs {
  1130. struct nfs4_label *label;
  1131. struct iattr *sattr;
  1132. const __u32 verf[2];
  1133. };
  1134. static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
  1135. int err, struct nfs4_exception *exception)
  1136. {
  1137. if (err != -EINVAL)
  1138. return false;
  1139. if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
  1140. return false;
  1141. server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
  1142. exception->retry = 1;
  1143. return true;
  1144. }
  1145. static fmode_t _nfs4_ctx_to_accessmode(const struct nfs_open_context *ctx)
  1146. {
  1147. return ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
  1148. }
  1149. static fmode_t _nfs4_ctx_to_openmode(const struct nfs_open_context *ctx)
  1150. {
  1151. fmode_t ret = ctx->mode & (FMODE_READ|FMODE_WRITE);
  1152. return (ctx->mode & FMODE_EXEC) ? FMODE_READ | ret : ret;
  1153. }
  1154. static u32
  1155. nfs4_map_atomic_open_share(struct nfs_server *server,
  1156. fmode_t fmode, int openflags)
  1157. {
  1158. u32 res = 0;
  1159. switch (fmode & (FMODE_READ | FMODE_WRITE)) {
  1160. case FMODE_READ:
  1161. res = NFS4_SHARE_ACCESS_READ;
  1162. break;
  1163. case FMODE_WRITE:
  1164. res = NFS4_SHARE_ACCESS_WRITE;
  1165. break;
  1166. case FMODE_READ|FMODE_WRITE:
  1167. res = NFS4_SHARE_ACCESS_BOTH;
  1168. }
  1169. if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
  1170. goto out;
  1171. /* Want no delegation if we're using O_DIRECT */
  1172. if (openflags & O_DIRECT)
  1173. res |= NFS4_SHARE_WANT_NO_DELEG;
  1174. out:
  1175. return res;
  1176. }
  1177. static enum open_claim_type4
  1178. nfs4_map_atomic_open_claim(struct nfs_server *server,
  1179. enum open_claim_type4 claim)
  1180. {
  1181. if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
  1182. return claim;
  1183. switch (claim) {
  1184. default:
  1185. return claim;
  1186. case NFS4_OPEN_CLAIM_FH:
  1187. return NFS4_OPEN_CLAIM_NULL;
  1188. case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
  1189. return NFS4_OPEN_CLAIM_DELEGATE_CUR;
  1190. case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
  1191. return NFS4_OPEN_CLAIM_DELEGATE_PREV;
  1192. }
  1193. }
  1194. static void nfs4_init_opendata_res(struct nfs4_opendata *p)
  1195. {
  1196. p->o_res.f_attr = &p->f_attr;
  1197. p->o_res.seqid = p->o_arg.seqid;
  1198. p->c_res.seqid = p->c_arg.seqid;
  1199. p->o_res.server = p->o_arg.server;
  1200. p->o_res.access_request = p->o_arg.access;
  1201. nfs_fattr_init(&p->f_attr);
  1202. nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
  1203. }
  1204. static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
  1205. struct nfs4_state_owner *sp, fmode_t fmode, int flags,
  1206. const struct nfs4_open_createattrs *c,
  1207. enum open_claim_type4 claim,
  1208. gfp_t gfp_mask)
  1209. {
  1210. struct dentry *parent = dget_parent(dentry);
  1211. struct inode *dir = d_inode(parent);
  1212. struct nfs_server *server = NFS_SERVER(dir);
  1213. struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
  1214. struct nfs4_label *label = (c != NULL) ? c->label : NULL;
  1215. struct nfs4_opendata *p;
  1216. p = kzalloc(sizeof(*p), gfp_mask);
  1217. if (p == NULL)
  1218. goto err;
  1219. p->f_attr.label = nfs4_label_alloc(server, gfp_mask);
  1220. if (IS_ERR(p->f_attr.label))
  1221. goto err_free_p;
  1222. p->a_label = nfs4_label_alloc(server, gfp_mask);
  1223. if (IS_ERR(p->a_label))
  1224. goto err_free_f;
  1225. alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
  1226. p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
  1227. if (IS_ERR(p->o_arg.seqid))
  1228. goto err_free_label;
  1229. nfs_sb_active(dentry->d_sb);
  1230. p->dentry = dget(dentry);
  1231. p->dir = parent;
  1232. p->owner = sp;
  1233. atomic_inc(&sp->so_count);
  1234. p->o_arg.open_flags = flags;
  1235. p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
  1236. p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
  1237. p->o_arg.share_access = nfs4_map_atomic_open_share(server,
  1238. fmode, flags);
  1239. if (flags & O_CREAT) {
  1240. p->o_arg.umask = current_umask();
  1241. p->o_arg.label = nfs4_label_copy(p->a_label, label);
  1242. if (c->sattr != NULL && c->sattr->ia_valid != 0) {
  1243. p->o_arg.u.attrs = &p->attrs;
  1244. memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
  1245. memcpy(p->o_arg.u.verifier.data, c->verf,
  1246. sizeof(p->o_arg.u.verifier.data));
  1247. }
  1248. }
  1249. /* ask server to check for all possible rights as results
  1250. * are cached */
  1251. switch (p->o_arg.claim) {
  1252. default:
  1253. break;
  1254. case NFS4_OPEN_CLAIM_NULL:
  1255. case NFS4_OPEN_CLAIM_FH:
  1256. p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
  1257. NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE |
  1258. NFS4_ACCESS_EXECUTE |
  1259. nfs_access_xattr_mask(server);
  1260. }
  1261. p->o_arg.clientid = server->nfs_client->cl_clientid;
  1262. p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
  1263. p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
  1264. p->o_arg.name = &dentry->d_name;
  1265. p->o_arg.server = server;
  1266. p->o_arg.bitmask = nfs4_bitmask(server, label);
  1267. p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
  1268. switch (p->o_arg.claim) {
  1269. case NFS4_OPEN_CLAIM_NULL:
  1270. case NFS4_OPEN_CLAIM_DELEGATE_CUR:
  1271. case NFS4_OPEN_CLAIM_DELEGATE_PREV:
  1272. p->o_arg.fh = NFS_FH(dir);
  1273. break;
  1274. case NFS4_OPEN_CLAIM_PREVIOUS:
  1275. case NFS4_OPEN_CLAIM_FH:
  1276. case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
  1277. case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
  1278. p->o_arg.fh = NFS_FH(d_inode(dentry));
  1279. }
  1280. p->c_arg.fh = &p->o_res.fh;
  1281. p->c_arg.stateid = &p->o_res.stateid;
  1282. p->c_arg.seqid = p->o_arg.seqid;
  1283. nfs4_init_opendata_res(p);
  1284. kref_init(&p->kref);
  1285. return p;
  1286. err_free_label:
  1287. nfs4_label_free(p->a_label);
  1288. err_free_f:
  1289. nfs4_label_free(p->f_attr.label);
  1290. err_free_p:
  1291. kfree(p);
  1292. err:
  1293. dput(parent);
  1294. return NULL;
  1295. }
  1296. static void nfs4_opendata_free(struct kref *kref)
  1297. {
  1298. struct nfs4_opendata *p = container_of(kref,
  1299. struct nfs4_opendata, kref);
  1300. struct super_block *sb = p->dentry->d_sb;
  1301. nfs4_lgopen_release(p->lgp);
  1302. nfs_free_seqid(p->o_arg.seqid);
  1303. nfs4_sequence_free_slot(&p->o_res.seq_res);
  1304. if (p->state != NULL)
  1305. nfs4_put_open_state(p->state);
  1306. nfs4_put_state_owner(p->owner);
  1307. nfs4_label_free(p->a_label);
  1308. nfs4_label_free(p->f_attr.label);
  1309. dput(p->dir);
  1310. dput(p->dentry);
  1311. nfs_sb_deactive(sb);
  1312. nfs_fattr_free_names(&p->f_attr);
  1313. kfree(p->f_attr.mdsthreshold);
  1314. kfree(p);
  1315. }
  1316. static void nfs4_opendata_put(struct nfs4_opendata *p)
  1317. {
  1318. if (p != NULL)
  1319. kref_put(&p->kref, nfs4_opendata_free);
  1320. }
  1321. static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
  1322. fmode_t fmode)
  1323. {
  1324. switch(fmode & (FMODE_READ|FMODE_WRITE)) {
  1325. case FMODE_READ|FMODE_WRITE:
  1326. return state->n_rdwr != 0;
  1327. case FMODE_WRITE:
  1328. return state->n_wronly != 0;
  1329. case FMODE_READ:
  1330. return state->n_rdonly != 0;
  1331. }
  1332. WARN_ON_ONCE(1);
  1333. return false;
  1334. }
  1335. static int can_open_cached(struct nfs4_state *state, fmode_t mode,
  1336. int open_mode, enum open_claim_type4 claim)
  1337. {
  1338. int ret = 0;
  1339. if (open_mode & (O_EXCL|O_TRUNC))
  1340. goto out;
  1341. switch (claim) {
  1342. case NFS4_OPEN_CLAIM_NULL:
  1343. case NFS4_OPEN_CLAIM_FH:
  1344. goto out;
  1345. default:
  1346. break;
  1347. }
  1348. switch (mode & (FMODE_READ|FMODE_WRITE)) {
  1349. case FMODE_READ:
  1350. ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
  1351. && state->n_rdonly != 0;
  1352. break;
  1353. case FMODE_WRITE:
  1354. ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
  1355. && state->n_wronly != 0;
  1356. break;
  1357. case FMODE_READ|FMODE_WRITE:
  1358. ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
  1359. && state->n_rdwr != 0;
  1360. }
  1361. out:
  1362. return ret;
  1363. }
  1364. static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
  1365. enum open_claim_type4 claim)
  1366. {
  1367. if (delegation == NULL)
  1368. return 0;
  1369. if ((delegation->type & fmode) != fmode)
  1370. return 0;
  1371. switch (claim) {
  1372. case NFS4_OPEN_CLAIM_NULL:
  1373. case NFS4_OPEN_CLAIM_FH:
  1374. break;
  1375. case NFS4_OPEN_CLAIM_PREVIOUS:
  1376. if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
  1377. break;
  1378. fallthrough;
  1379. default:
  1380. return 0;
  1381. }
  1382. nfs_mark_delegation_referenced(delegation);
  1383. return 1;
  1384. }
  1385. static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
  1386. {
  1387. switch (fmode) {
  1388. case FMODE_WRITE:
  1389. state->n_wronly++;
  1390. break;
  1391. case FMODE_READ:
  1392. state->n_rdonly++;
  1393. break;
  1394. case FMODE_READ|FMODE_WRITE:
  1395. state->n_rdwr++;
  1396. }
  1397. nfs4_state_set_mode_locked(state, state->state | fmode);
  1398. }
  1399. #ifdef CONFIG_NFS_V4_1
  1400. static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
  1401. {
  1402. if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
  1403. return true;
  1404. if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
  1405. return true;
  1406. if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
  1407. return true;
  1408. return false;
  1409. }
  1410. #endif /* CONFIG_NFS_V4_1 */
  1411. static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
  1412. {
  1413. if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
  1414. wake_up_all(&state->waitq);
  1415. }
  1416. static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
  1417. {
  1418. struct nfs_client *clp = state->owner->so_server->nfs_client;
  1419. bool need_recover = false;
  1420. if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
  1421. need_recover = true;
  1422. if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
  1423. need_recover = true;
  1424. if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
  1425. need_recover = true;
  1426. if (need_recover)
  1427. nfs4_state_mark_reclaim_nograce(clp, state);
  1428. }
  1429. /*
  1430. * Check for whether or not the caller may update the open stateid
  1431. * to the value passed in by stateid.
  1432. *
  1433. * Note: This function relies heavily on the server implementing
  1434. * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
  1435. * correctly.
  1436. * i.e. The stateid seqids have to be initialised to 1, and
  1437. * are then incremented on every state transition.
  1438. */
  1439. static bool nfs_stateid_is_sequential(struct nfs4_state *state,
  1440. const nfs4_stateid *stateid)
  1441. {
  1442. if (test_bit(NFS_OPEN_STATE, &state->flags)) {
  1443. /* The common case - we're updating to a new sequence number */
  1444. if (nfs4_stateid_match_other(stateid, &state->open_stateid)) {
  1445. if (nfs4_stateid_is_next(&state->open_stateid, stateid))
  1446. return true;
  1447. return false;
  1448. }
  1449. /* The server returned a new stateid */
  1450. }
  1451. /* This is the first OPEN in this generation */
  1452. if (stateid->seqid == cpu_to_be32(1))
  1453. return true;
  1454. return false;
  1455. }
  1456. static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
  1457. {
  1458. if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
  1459. return;
  1460. if (state->n_wronly)
  1461. set_bit(NFS_O_WRONLY_STATE, &state->flags);
  1462. if (state->n_rdonly)
  1463. set_bit(NFS_O_RDONLY_STATE, &state->flags);
  1464. if (state->n_rdwr)
  1465. set_bit(NFS_O_RDWR_STATE, &state->flags);
  1466. set_bit(NFS_OPEN_STATE, &state->flags);
  1467. }
  1468. static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
  1469. nfs4_stateid *stateid, fmode_t fmode)
  1470. {
  1471. clear_bit(NFS_O_RDWR_STATE, &state->flags);
  1472. switch (fmode & (FMODE_READ|FMODE_WRITE)) {
  1473. case FMODE_WRITE:
  1474. clear_bit(NFS_O_RDONLY_STATE, &state->flags);
  1475. break;
  1476. case FMODE_READ:
  1477. clear_bit(NFS_O_WRONLY_STATE, &state->flags);
  1478. break;
  1479. case 0:
  1480. clear_bit(NFS_O_RDONLY_STATE, &state->flags);
  1481. clear_bit(NFS_O_WRONLY_STATE, &state->flags);
  1482. clear_bit(NFS_OPEN_STATE, &state->flags);
  1483. }
  1484. if (stateid == NULL)
  1485. return;
  1486. /* Handle OPEN+OPEN_DOWNGRADE races */
  1487. if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
  1488. !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
  1489. nfs_resync_open_stateid_locked(state);
  1490. goto out;
  1491. }
  1492. if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
  1493. nfs4_stateid_copy(&state->stateid, stateid);
  1494. nfs4_stateid_copy(&state->open_stateid, stateid);
  1495. trace_nfs4_open_stateid_update(state->inode, stateid, 0);
  1496. out:
  1497. nfs_state_log_update_open_stateid(state);
  1498. }
  1499. static void nfs_clear_open_stateid(struct nfs4_state *state,
  1500. nfs4_stateid *arg_stateid,
  1501. nfs4_stateid *stateid, fmode_t fmode)
  1502. {
  1503. write_seqlock(&state->seqlock);
  1504. /* Ignore, if the CLOSE argment doesn't match the current stateid */
  1505. if (nfs4_state_match_open_stateid_other(state, arg_stateid))
  1506. nfs_clear_open_stateid_locked(state, stateid, fmode);
  1507. write_sequnlock(&state->seqlock);
  1508. if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
  1509. nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
  1510. }
  1511. static void nfs_set_open_stateid_locked(struct nfs4_state *state,
  1512. const nfs4_stateid *stateid, nfs4_stateid *freeme)
  1513. __must_hold(&state->owner->so_lock)
  1514. __must_hold(&state->seqlock)
  1515. __must_hold(RCU)
  1516. {
  1517. DEFINE_WAIT(wait);
  1518. int status = 0;
  1519. for (;;) {
  1520. if (nfs_stateid_is_sequential(state, stateid))
  1521. break;
  1522. if (status)
  1523. break;
  1524. /* Rely on seqids for serialisation with NFSv4.0 */
  1525. if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
  1526. break;
  1527. set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
  1528. prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
  1529. /*
  1530. * Ensure we process the state changes in the same order
  1531. * in which the server processed them by delaying the
  1532. * update of the stateid until we are in sequence.
  1533. */
  1534. write_sequnlock(&state->seqlock);
  1535. spin_unlock(&state->owner->so_lock);
  1536. rcu_read_unlock();
  1537. trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
  1538. if (!fatal_signal_pending(current)) {
  1539. if (schedule_timeout(5*HZ) == 0)
  1540. status = -EAGAIN;
  1541. else
  1542. status = 0;
  1543. } else
  1544. status = -EINTR;
  1545. finish_wait(&state->waitq, &wait);
  1546. rcu_read_lock();
  1547. spin_lock(&state->owner->so_lock);
  1548. write_seqlock(&state->seqlock);
  1549. }
  1550. if (test_bit(NFS_OPEN_STATE, &state->flags) &&
  1551. !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
  1552. nfs4_stateid_copy(freeme, &state->open_stateid);
  1553. nfs_test_and_clear_all_open_stateid(state);
  1554. }
  1555. if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
  1556. nfs4_stateid_copy(&state->stateid, stateid);
  1557. nfs4_stateid_copy(&state->open_stateid, stateid);
  1558. trace_nfs4_open_stateid_update(state->inode, stateid, status);
  1559. nfs_state_log_update_open_stateid(state);
  1560. }
  1561. static void nfs_state_set_open_stateid(struct nfs4_state *state,
  1562. const nfs4_stateid *open_stateid,
  1563. fmode_t fmode,
  1564. nfs4_stateid *freeme)
  1565. {
  1566. /*
  1567. * Protect the call to nfs4_state_set_mode_locked and
  1568. * serialise the stateid update
  1569. */
  1570. write_seqlock(&state->seqlock);
  1571. nfs_set_open_stateid_locked(state, open_stateid, freeme);
  1572. switch (fmode) {
  1573. case FMODE_READ:
  1574. set_bit(NFS_O_RDONLY_STATE, &state->flags);
  1575. break;
  1576. case FMODE_WRITE:
  1577. set_bit(NFS_O_WRONLY_STATE, &state->flags);
  1578. break;
  1579. case FMODE_READ|FMODE_WRITE:
  1580. set_bit(NFS_O_RDWR_STATE, &state->flags);
  1581. }
  1582. set_bit(NFS_OPEN_STATE, &state->flags);
  1583. write_sequnlock(&state->seqlock);
  1584. }
  1585. static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
  1586. {
  1587. clear_bit(NFS_O_RDWR_STATE, &state->flags);
  1588. clear_bit(NFS_O_WRONLY_STATE, &state->flags);
  1589. clear_bit(NFS_O_RDONLY_STATE, &state->flags);
  1590. clear_bit(NFS_OPEN_STATE, &state->flags);
  1591. }
  1592. static void nfs_state_set_delegation(struct nfs4_state *state,
  1593. const nfs4_stateid *deleg_stateid,
  1594. fmode_t fmode)
  1595. {
  1596. /*
  1597. * Protect the call to nfs4_state_set_mode_locked and
  1598. * serialise the stateid update
  1599. */
  1600. write_seqlock(&state->seqlock);
  1601. nfs4_stateid_copy(&state->stateid, deleg_stateid);
  1602. set_bit(NFS_DELEGATED_STATE, &state->flags);
  1603. write_sequnlock(&state->seqlock);
  1604. }
  1605. static void nfs_state_clear_delegation(struct nfs4_state *state)
  1606. {
  1607. write_seqlock(&state->seqlock);
  1608. nfs4_stateid_copy(&state->stateid, &state->open_stateid);
  1609. clear_bit(NFS_DELEGATED_STATE, &state->flags);
  1610. write_sequnlock(&state->seqlock);
  1611. }
  1612. int update_open_stateid(struct nfs4_state *state,
  1613. const nfs4_stateid *open_stateid,
  1614. const nfs4_stateid *delegation,
  1615. fmode_t fmode)
  1616. {
  1617. struct nfs_server *server = NFS_SERVER(state->inode);
  1618. struct nfs_client *clp = server->nfs_client;
  1619. struct nfs_inode *nfsi = NFS_I(state->inode);
  1620. struct nfs_delegation *deleg_cur;
  1621. nfs4_stateid freeme = { };
  1622. int ret = 0;
  1623. fmode &= (FMODE_READ|FMODE_WRITE);
  1624. rcu_read_lock();
  1625. spin_lock(&state->owner->so_lock);
  1626. if (open_stateid != NULL) {
  1627. nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
  1628. ret = 1;
  1629. }
  1630. deleg_cur = nfs4_get_valid_delegation(state->inode);
  1631. if (deleg_cur == NULL)
  1632. goto no_delegation;
  1633. spin_lock(&deleg_cur->lock);
  1634. if (rcu_dereference(nfsi->delegation) != deleg_cur ||
  1635. test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
  1636. (deleg_cur->type & fmode) != fmode)
  1637. goto no_delegation_unlock;
  1638. if (delegation == NULL)
  1639. delegation = &deleg_cur->stateid;
  1640. else if (!nfs4_stateid_match_other(&deleg_cur->stateid, delegation))
  1641. goto no_delegation_unlock;
  1642. nfs_mark_delegation_referenced(deleg_cur);
  1643. nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
  1644. ret = 1;
  1645. no_delegation_unlock:
  1646. spin_unlock(&deleg_cur->lock);
  1647. no_delegation:
  1648. if (ret)
  1649. update_open_stateflags(state, fmode);
  1650. spin_unlock(&state->owner->so_lock);
  1651. rcu_read_unlock();
  1652. if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
  1653. nfs4_schedule_state_manager(clp);
  1654. if (freeme.type != 0)
  1655. nfs4_test_and_free_stateid(server, &freeme,
  1656. state->owner->so_cred);
  1657. return ret;
  1658. }
  1659. static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
  1660. const nfs4_stateid *stateid)
  1661. {
  1662. struct nfs4_state *state = lsp->ls_state;
  1663. bool ret = false;
  1664. spin_lock(&state->state_lock);
  1665. if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
  1666. goto out_noupdate;
  1667. if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
  1668. goto out_noupdate;
  1669. nfs4_stateid_copy(&lsp->ls_stateid, stateid);
  1670. ret = true;
  1671. out_noupdate:
  1672. spin_unlock(&state->state_lock);
  1673. return ret;
  1674. }
  1675. static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
  1676. {
  1677. struct nfs_delegation *delegation;
  1678. fmode &= FMODE_READ|FMODE_WRITE;
  1679. rcu_read_lock();
  1680. delegation = nfs4_get_valid_delegation(inode);
  1681. if (delegation == NULL || (delegation->type & fmode) == fmode) {
  1682. rcu_read_unlock();
  1683. return;
  1684. }
  1685. rcu_read_unlock();
  1686. nfs4_inode_return_delegation(inode);
  1687. }
  1688. static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
  1689. {
  1690. struct nfs4_state *state = opendata->state;
  1691. struct nfs_delegation *delegation;
  1692. int open_mode = opendata->o_arg.open_flags;
  1693. fmode_t fmode = opendata->o_arg.fmode;
  1694. enum open_claim_type4 claim = opendata->o_arg.claim;
  1695. nfs4_stateid stateid;
  1696. int ret = -EAGAIN;
  1697. for (;;) {
  1698. spin_lock(&state->owner->so_lock);
  1699. if (can_open_cached(state, fmode, open_mode, claim)) {
  1700. update_open_stateflags(state, fmode);
  1701. spin_unlock(&state->owner->so_lock);
  1702. goto out_return_state;
  1703. }
  1704. spin_unlock(&state->owner->so_lock);
  1705. rcu_read_lock();
  1706. delegation = nfs4_get_valid_delegation(state->inode);
  1707. if (!can_open_delegated(delegation, fmode, claim)) {
  1708. rcu_read_unlock();
  1709. break;
  1710. }
  1711. /* Save the delegation */
  1712. nfs4_stateid_copy(&stateid, &delegation->stateid);
  1713. rcu_read_unlock();
  1714. nfs_release_seqid(opendata->o_arg.seqid);
  1715. if (!opendata->is_recover) {
  1716. ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
  1717. if (ret != 0)
  1718. goto out;
  1719. }
  1720. ret = -EAGAIN;
  1721. /* Try to update the stateid using the delegation */
  1722. if (update_open_stateid(state, NULL, &stateid, fmode))
  1723. goto out_return_state;
  1724. }
  1725. out:
  1726. return ERR_PTR(ret);
  1727. out_return_state:
  1728. refcount_inc(&state->count);
  1729. return state;
  1730. }
  1731. static void
  1732. nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
  1733. {
  1734. struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
  1735. struct nfs_delegation *delegation;
  1736. int delegation_flags = 0;
  1737. rcu_read_lock();
  1738. delegation = rcu_dereference(NFS_I(state->inode)->delegation);
  1739. if (delegation)
  1740. delegation_flags = delegation->flags;
  1741. rcu_read_unlock();
  1742. switch (data->o_arg.claim) {
  1743. default:
  1744. break;
  1745. case NFS4_OPEN_CLAIM_DELEGATE_CUR:
  1746. case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
  1747. pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
  1748. "returning a delegation for "
  1749. "OPEN(CLAIM_DELEGATE_CUR)\n",
  1750. clp->cl_hostname);
  1751. return;
  1752. }
  1753. if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
  1754. nfs_inode_set_delegation(state->inode,
  1755. data->owner->so_cred,
  1756. data->o_res.delegation_type,
  1757. &data->o_res.delegation,
  1758. data->o_res.pagemod_limit);
  1759. else
  1760. nfs_inode_reclaim_delegation(state->inode,
  1761. data->owner->so_cred,
  1762. data->o_res.delegation_type,
  1763. &data->o_res.delegation,
  1764. data->o_res.pagemod_limit);
  1765. if (data->o_res.do_recall)
  1766. nfs_async_inode_return_delegation(state->inode,
  1767. &data->o_res.delegation);
  1768. }
  1769. /*
  1770. * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
  1771. * and update the nfs4_state.
  1772. */
  1773. static struct nfs4_state *
  1774. _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
  1775. {
  1776. struct inode *inode = data->state->inode;
  1777. struct nfs4_state *state = data->state;
  1778. int ret;
  1779. if (!data->rpc_done) {
  1780. if (data->rpc_status)
  1781. return ERR_PTR(data->rpc_status);
  1782. return nfs4_try_open_cached(data);
  1783. }
  1784. ret = nfs_refresh_inode(inode, &data->f_attr);
  1785. if (ret)
  1786. return ERR_PTR(ret);
  1787. if (data->o_res.delegation_type != 0)
  1788. nfs4_opendata_check_deleg(data, state);
  1789. if (!update_open_stateid(state, &data->o_res.stateid,
  1790. NULL, data->o_arg.fmode))
  1791. return ERR_PTR(-EAGAIN);
  1792. refcount_inc(&state->count);
  1793. return state;
  1794. }
  1795. static struct inode *
  1796. nfs4_opendata_get_inode(struct nfs4_opendata *data)
  1797. {
  1798. struct inode *inode;
  1799. switch (data->o_arg.claim) {
  1800. case NFS4_OPEN_CLAIM_NULL:
  1801. case NFS4_OPEN_CLAIM_DELEGATE_CUR:
  1802. case NFS4_OPEN_CLAIM_DELEGATE_PREV:
  1803. if (!(data->f_attr.valid & NFS_ATTR_FATTR))
  1804. return ERR_PTR(-EAGAIN);
  1805. inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
  1806. &data->f_attr);
  1807. break;
  1808. default:
  1809. inode = d_inode(data->dentry);
  1810. ihold(inode);
  1811. nfs_refresh_inode(inode, &data->f_attr);
  1812. }
  1813. return inode;
  1814. }
  1815. static struct nfs4_state *
  1816. nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
  1817. {
  1818. struct nfs4_state *state;
  1819. struct inode *inode;
  1820. inode = nfs4_opendata_get_inode(data);
  1821. if (IS_ERR(inode))
  1822. return ERR_CAST(inode);
  1823. if (data->state != NULL && data->state->inode == inode) {
  1824. state = data->state;
  1825. refcount_inc(&state->count);
  1826. } else
  1827. state = nfs4_get_open_state(inode, data->owner);
  1828. iput(inode);
  1829. if (state == NULL)
  1830. state = ERR_PTR(-ENOMEM);
  1831. return state;
  1832. }
  1833. static struct nfs4_state *
  1834. _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
  1835. {
  1836. struct nfs4_state *state;
  1837. if (!data->rpc_done) {
  1838. state = nfs4_try_open_cached(data);
  1839. trace_nfs4_cached_open(data->state);
  1840. goto out;
  1841. }
  1842. state = nfs4_opendata_find_nfs4_state(data);
  1843. if (IS_ERR(state))
  1844. goto out;
  1845. if (data->o_res.delegation_type != 0)
  1846. nfs4_opendata_check_deleg(data, state);
  1847. if (!update_open_stateid(state, &data->o_res.stateid,
  1848. NULL, data->o_arg.fmode)) {
  1849. nfs4_put_open_state(state);
  1850. state = ERR_PTR(-EAGAIN);
  1851. }
  1852. out:
  1853. nfs_release_seqid(data->o_arg.seqid);
  1854. return state;
  1855. }
  1856. static struct nfs4_state *
  1857. nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
  1858. {
  1859. struct nfs4_state *ret;
  1860. if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
  1861. ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
  1862. else
  1863. ret = _nfs4_opendata_to_nfs4_state(data);
  1864. nfs4_sequence_free_slot(&data->o_res.seq_res);
  1865. return ret;
  1866. }
  1867. static struct nfs_open_context *
  1868. nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
  1869. {
  1870. struct nfs_inode *nfsi = NFS_I(state->inode);
  1871. struct nfs_open_context *ctx;
  1872. rcu_read_lock();
  1873. list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
  1874. if (ctx->state != state)
  1875. continue;
  1876. if ((ctx->mode & mode) != mode)
  1877. continue;
  1878. if (!get_nfs_open_context(ctx))
  1879. continue;
  1880. rcu_read_unlock();
  1881. return ctx;
  1882. }
  1883. rcu_read_unlock();
  1884. return ERR_PTR(-ENOENT);
  1885. }
  1886. static struct nfs_open_context *
  1887. nfs4_state_find_open_context(struct nfs4_state *state)
  1888. {
  1889. struct nfs_open_context *ctx;
  1890. ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
  1891. if (!IS_ERR(ctx))
  1892. return ctx;
  1893. ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
  1894. if (!IS_ERR(ctx))
  1895. return ctx;
  1896. return nfs4_state_find_open_context_mode(state, FMODE_READ);
  1897. }
  1898. static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
  1899. struct nfs4_state *state, enum open_claim_type4 claim)
  1900. {
  1901. struct nfs4_opendata *opendata;
  1902. opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
  1903. NULL, claim, GFP_NOFS);
  1904. if (opendata == NULL)
  1905. return ERR_PTR(-ENOMEM);
  1906. opendata->state = state;
  1907. refcount_inc(&state->count);
  1908. return opendata;
  1909. }
  1910. static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
  1911. fmode_t fmode)
  1912. {
  1913. struct nfs4_state *newstate;
  1914. struct nfs_server *server = NFS_SB(opendata->dentry->d_sb);
  1915. int openflags = opendata->o_arg.open_flags;
  1916. int ret;
  1917. if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
  1918. return 0;
  1919. opendata->o_arg.fmode = fmode;
  1920. opendata->o_arg.share_access =
  1921. nfs4_map_atomic_open_share(server, fmode, openflags);
  1922. memset(&opendata->o_res, 0, sizeof(opendata->o_res));
  1923. memset(&opendata->c_res, 0, sizeof(opendata->c_res));
  1924. nfs4_init_opendata_res(opendata);
  1925. ret = _nfs4_recover_proc_open(opendata);
  1926. if (ret != 0)
  1927. return ret;
  1928. newstate = nfs4_opendata_to_nfs4_state(opendata);
  1929. if (IS_ERR(newstate))
  1930. return PTR_ERR(newstate);
  1931. if (newstate != opendata->state)
  1932. ret = -ESTALE;
  1933. nfs4_close_state(newstate, fmode);
  1934. return ret;
  1935. }
  1936. static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
  1937. {
  1938. int ret;
  1939. /* memory barrier prior to reading state->n_* */
  1940. smp_rmb();
  1941. ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
  1942. if (ret != 0)
  1943. return ret;
  1944. ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
  1945. if (ret != 0)
  1946. return ret;
  1947. ret = nfs4_open_recover_helper(opendata, FMODE_READ);
  1948. if (ret != 0)
  1949. return ret;
  1950. /*
  1951. * We may have performed cached opens for all three recoveries.
  1952. * Check if we need to update the current stateid.
  1953. */
  1954. if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
  1955. !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
  1956. write_seqlock(&state->seqlock);
  1957. if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
  1958. nfs4_stateid_copy(&state->stateid, &state->open_stateid);
  1959. write_sequnlock(&state->seqlock);
  1960. }
  1961. return 0;
  1962. }
  1963. /*
  1964. * OPEN_RECLAIM:
  1965. * reclaim state on the server after a reboot.
  1966. */
  1967. static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
  1968. {
  1969. struct nfs_delegation *delegation;
  1970. struct nfs4_opendata *opendata;
  1971. fmode_t delegation_type = 0;
  1972. int status;
  1973. opendata = nfs4_open_recoverdata_alloc(ctx, state,
  1974. NFS4_OPEN_CLAIM_PREVIOUS);
  1975. if (IS_ERR(opendata))
  1976. return PTR_ERR(opendata);
  1977. rcu_read_lock();
  1978. delegation = rcu_dereference(NFS_I(state->inode)->delegation);
  1979. if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
  1980. delegation_type = delegation->type;
  1981. rcu_read_unlock();
  1982. opendata->o_arg.u.delegation_type = delegation_type;
  1983. status = nfs4_open_recover(opendata, state);
  1984. nfs4_opendata_put(opendata);
  1985. return status;
  1986. }
  1987. static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
  1988. {
  1989. struct nfs_server *server = NFS_SERVER(state->inode);
  1990. struct nfs4_exception exception = { };
  1991. int err;
  1992. do {
  1993. err = _nfs4_do_open_reclaim(ctx, state);
  1994. trace_nfs4_open_reclaim(ctx, 0, err);
  1995. if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
  1996. continue;
  1997. if (err != -NFS4ERR_DELAY)
  1998. break;
  1999. nfs4_handle_exception(server, err, &exception);
  2000. } while (exception.retry);
  2001. return err;
  2002. }
  2003. static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
  2004. {
  2005. struct nfs_open_context *ctx;
  2006. int ret;
  2007. ctx = nfs4_state_find_open_context(state);
  2008. if (IS_ERR(ctx))
  2009. return -EAGAIN;
  2010. clear_bit(NFS_DELEGATED_STATE, &state->flags);
  2011. nfs_state_clear_open_state_flags(state);
  2012. ret = nfs4_do_open_reclaim(ctx, state);
  2013. put_nfs_open_context(ctx);
  2014. return ret;
  2015. }
  2016. static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
  2017. {
  2018. switch (err) {
  2019. default:
  2020. printk(KERN_ERR "NFS: %s: unhandled error "
  2021. "%d.\n", __func__, err);
  2022. fallthrough;
  2023. case 0:
  2024. case -ENOENT:
  2025. case -EAGAIN:
  2026. case -ESTALE:
  2027. case -ETIMEDOUT:
  2028. break;
  2029. case -NFS4ERR_BADSESSION:
  2030. case -NFS4ERR_BADSLOT:
  2031. case -NFS4ERR_BAD_HIGH_SLOT:
  2032. case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
  2033. case -NFS4ERR_DEADSESSION:
  2034. return -EAGAIN;
  2035. case -NFS4ERR_STALE_CLIENTID:
  2036. case -NFS4ERR_STALE_STATEID:
  2037. /* Don't recall a delegation if it was lost */
  2038. nfs4_schedule_lease_recovery(server->nfs_client);
  2039. return -EAGAIN;
  2040. case -NFS4ERR_MOVED:
  2041. nfs4_schedule_migration_recovery(server);
  2042. return -EAGAIN;
  2043. case -NFS4ERR_LEASE_MOVED:
  2044. nfs4_schedule_lease_moved_recovery(server->nfs_client);
  2045. return -EAGAIN;
  2046. case -NFS4ERR_DELEG_REVOKED:
  2047. case -NFS4ERR_ADMIN_REVOKED:
  2048. case -NFS4ERR_EXPIRED:
  2049. case -NFS4ERR_BAD_STATEID:
  2050. case -NFS4ERR_OPENMODE:
  2051. nfs_inode_find_state_and_recover(state->inode,
  2052. stateid);
  2053. nfs4_schedule_stateid_recovery(server, state);
  2054. return -EAGAIN;
  2055. case -NFS4ERR_DELAY:
  2056. case -NFS4ERR_GRACE:
  2057. ssleep(1);
  2058. return -EAGAIN;
  2059. case -ENOMEM:
  2060. case -NFS4ERR_DENIED:
  2061. if (fl) {
  2062. struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
  2063. if (lsp)
  2064. set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
  2065. }
  2066. return 0;
  2067. }
  2068. return err;
  2069. }
  2070. int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
  2071. struct nfs4_state *state, const nfs4_stateid *stateid)
  2072. {
  2073. struct nfs_server *server = NFS_SERVER(state->inode);
  2074. struct nfs4_opendata *opendata;
  2075. int err = 0;
  2076. opendata = nfs4_open_recoverdata_alloc(ctx, state,
  2077. NFS4_OPEN_CLAIM_DELEG_CUR_FH);
  2078. if (IS_ERR(opendata))
  2079. return PTR_ERR(opendata);
  2080. nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
  2081. if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
  2082. err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
  2083. if (err)
  2084. goto out;
  2085. }
  2086. if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
  2087. err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
  2088. if (err)
  2089. goto out;
  2090. }
  2091. if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
  2092. err = nfs4_open_recover_helper(opendata, FMODE_READ);
  2093. if (err)
  2094. goto out;
  2095. }
  2096. nfs_state_clear_delegation(state);
  2097. out:
  2098. nfs4_opendata_put(opendata);
  2099. return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
  2100. }
  2101. static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
  2102. {
  2103. struct nfs4_opendata *data = calldata;
  2104. nfs4_setup_sequence(data->o_arg.server->nfs_client,
  2105. &data->c_arg.seq_args, &data->c_res.seq_res, task);
  2106. }
  2107. static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
  2108. {
  2109. struct nfs4_opendata *data = calldata;
  2110. nfs40_sequence_done(task, &data->c_res.seq_res);
  2111. data->rpc_status = task->tk_status;
  2112. if (data->rpc_status == 0) {
  2113. nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
  2114. nfs_confirm_seqid(&data->owner->so_seqid, 0);
  2115. renew_lease(data->o_res.server, data->timestamp);
  2116. data->rpc_done = true;
  2117. }
  2118. }
  2119. static void nfs4_open_confirm_release(void *calldata)
  2120. {
  2121. struct nfs4_opendata *data = calldata;
  2122. struct nfs4_state *state = NULL;
  2123. /* If this request hasn't been cancelled, do nothing */
  2124. if (!data->cancelled)
  2125. goto out_free;
  2126. /* In case of error, no cleanup! */
  2127. if (!data->rpc_done)
  2128. goto out_free;
  2129. state = nfs4_opendata_to_nfs4_state(data);
  2130. if (!IS_ERR(state))
  2131. nfs4_close_state(state, data->o_arg.fmode);
  2132. out_free:
  2133. nfs4_opendata_put(data);
  2134. }
  2135. static const struct rpc_call_ops nfs4_open_confirm_ops = {
  2136. .rpc_call_prepare = nfs4_open_confirm_prepare,
  2137. .rpc_call_done = nfs4_open_confirm_done,
  2138. .rpc_release = nfs4_open_confirm_release,
  2139. };
  2140. /*
  2141. * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
  2142. */
  2143. static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
  2144. {
  2145. struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
  2146. struct rpc_task *task;
  2147. struct rpc_message msg = {
  2148. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
  2149. .rpc_argp = &data->c_arg,
  2150. .rpc_resp = &data->c_res,
  2151. .rpc_cred = data->owner->so_cred,
  2152. };
  2153. struct rpc_task_setup task_setup_data = {
  2154. .rpc_client = server->client,
  2155. .rpc_message = &msg,
  2156. .callback_ops = &nfs4_open_confirm_ops,
  2157. .callback_data = data,
  2158. .workqueue = nfsiod_workqueue,
  2159. .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
  2160. };
  2161. int status;
  2162. nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1,
  2163. data->is_recover);
  2164. kref_get(&data->kref);
  2165. data->rpc_done = false;
  2166. data->rpc_status = 0;
  2167. data->timestamp = jiffies;
  2168. task = rpc_run_task(&task_setup_data);
  2169. if (IS_ERR(task))
  2170. return PTR_ERR(task);
  2171. status = rpc_wait_for_completion_task(task);
  2172. if (status != 0) {
  2173. data->cancelled = true;
  2174. smp_wmb();
  2175. } else
  2176. status = data->rpc_status;
  2177. rpc_put_task(task);
  2178. return status;
  2179. }
  2180. static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
  2181. {
  2182. struct nfs4_opendata *data = calldata;
  2183. struct nfs4_state_owner *sp = data->owner;
  2184. struct nfs_client *clp = sp->so_server->nfs_client;
  2185. enum open_claim_type4 claim = data->o_arg.claim;
  2186. if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
  2187. goto out_wait;
  2188. /*
  2189. * Check if we still need to send an OPEN call, or if we can use
  2190. * a delegation instead.
  2191. */
  2192. if (data->state != NULL) {
  2193. struct nfs_delegation *delegation;
  2194. if (can_open_cached(data->state, data->o_arg.fmode,
  2195. data->o_arg.open_flags, claim))
  2196. goto out_no_action;
  2197. rcu_read_lock();
  2198. delegation = nfs4_get_valid_delegation(data->state->inode);
  2199. if (can_open_delegated(delegation, data->o_arg.fmode, claim))
  2200. goto unlock_no_action;
  2201. rcu_read_unlock();
  2202. }
  2203. /* Update client id. */
  2204. data->o_arg.clientid = clp->cl_clientid;
  2205. switch (claim) {
  2206. default:
  2207. break;
  2208. case NFS4_OPEN_CLAIM_PREVIOUS:
  2209. case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
  2210. case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
  2211. data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
  2212. fallthrough;
  2213. case NFS4_OPEN_CLAIM_FH:
  2214. task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
  2215. }
  2216. data->timestamp = jiffies;
  2217. if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
  2218. &data->o_arg.seq_args,
  2219. &data->o_res.seq_res,
  2220. task) != 0)
  2221. nfs_release_seqid(data->o_arg.seqid);
  2222. /* Set the create mode (note dependency on the session type) */
  2223. data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
  2224. if (data->o_arg.open_flags & O_EXCL) {
  2225. data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
  2226. if (clp->cl_mvops->minor_version == 0) {
  2227. data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
  2228. /* don't put an ACCESS op in OPEN compound if O_EXCL,
  2229. * because ACCESS will return permission denied for
  2230. * all bits until close */
  2231. data->o_res.access_request = data->o_arg.access = 0;
  2232. } else if (nfs4_has_persistent_session(clp))
  2233. data->o_arg.createmode = NFS4_CREATE_GUARDED;
  2234. }
  2235. return;
  2236. unlock_no_action:
  2237. trace_nfs4_cached_open(data->state);
  2238. rcu_read_unlock();
  2239. out_no_action:
  2240. task->tk_action = NULL;
  2241. out_wait:
  2242. nfs4_sequence_done(task, &data->o_res.seq_res);
  2243. }
  2244. static void nfs4_open_done(struct rpc_task *task, void *calldata)
  2245. {
  2246. struct nfs4_opendata *data = calldata;
  2247. data->rpc_status = task->tk_status;
  2248. if (!nfs4_sequence_process(task, &data->o_res.seq_res))
  2249. return;
  2250. if (task->tk_status == 0) {
  2251. if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
  2252. switch (data->o_res.f_attr->mode & S_IFMT) {
  2253. case S_IFREG:
  2254. break;
  2255. case S_IFLNK:
  2256. data->rpc_status = -ELOOP;
  2257. break;
  2258. case S_IFDIR:
  2259. data->rpc_status = -EISDIR;
  2260. break;
  2261. default:
  2262. data->rpc_status = -ENOTDIR;
  2263. }
  2264. }
  2265. renew_lease(data->o_res.server, data->timestamp);
  2266. if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
  2267. nfs_confirm_seqid(&data->owner->so_seqid, 0);
  2268. }
  2269. data->rpc_done = true;
  2270. }
  2271. static void nfs4_open_release(void *calldata)
  2272. {
  2273. struct nfs4_opendata *data = calldata;
  2274. struct nfs4_state *state = NULL;
  2275. /* If this request hasn't been cancelled, do nothing */
  2276. if (!data->cancelled)
  2277. goto out_free;
  2278. /* In case of error, no cleanup! */
  2279. if (data->rpc_status != 0 || !data->rpc_done)
  2280. goto out_free;
  2281. /* In case we need an open_confirm, no cleanup! */
  2282. if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
  2283. goto out_free;
  2284. state = nfs4_opendata_to_nfs4_state(data);
  2285. if (!IS_ERR(state))
  2286. nfs4_close_state(state, data->o_arg.fmode);
  2287. out_free:
  2288. nfs4_opendata_put(data);
  2289. }
  2290. static const struct rpc_call_ops nfs4_open_ops = {
  2291. .rpc_call_prepare = nfs4_open_prepare,
  2292. .rpc_call_done = nfs4_open_done,
  2293. .rpc_release = nfs4_open_release,
  2294. };
  2295. static int nfs4_run_open_task(struct nfs4_opendata *data,
  2296. struct nfs_open_context *ctx)
  2297. {
  2298. struct inode *dir = d_inode(data->dir);
  2299. struct nfs_server *server = NFS_SERVER(dir);
  2300. struct nfs_openargs *o_arg = &data->o_arg;
  2301. struct nfs_openres *o_res = &data->o_res;
  2302. struct rpc_task *task;
  2303. struct rpc_message msg = {
  2304. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
  2305. .rpc_argp = o_arg,
  2306. .rpc_resp = o_res,
  2307. .rpc_cred = data->owner->so_cred,
  2308. };
  2309. struct rpc_task_setup task_setup_data = {
  2310. .rpc_client = server->client,
  2311. .rpc_message = &msg,
  2312. .callback_ops = &nfs4_open_ops,
  2313. .callback_data = data,
  2314. .workqueue = nfsiod_workqueue,
  2315. .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
  2316. };
  2317. int status;
  2318. if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
  2319. task_setup_data.flags |= RPC_TASK_MOVEABLE;
  2320. kref_get(&data->kref);
  2321. data->rpc_done = false;
  2322. data->rpc_status = 0;
  2323. data->cancelled = false;
  2324. data->is_recover = false;
  2325. if (!ctx) {
  2326. nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
  2327. data->is_recover = true;
  2328. task_setup_data.flags |= RPC_TASK_TIMEOUT;
  2329. } else {
  2330. nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
  2331. pnfs_lgopen_prepare(data, ctx);
  2332. }
  2333. task = rpc_run_task(&task_setup_data);
  2334. if (IS_ERR(task))
  2335. return PTR_ERR(task);
  2336. status = rpc_wait_for_completion_task(task);
  2337. if (status != 0) {
  2338. data->cancelled = true;
  2339. smp_wmb();
  2340. } else
  2341. status = data->rpc_status;
  2342. rpc_put_task(task);
  2343. return status;
  2344. }
  2345. static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
  2346. {
  2347. struct inode *dir = d_inode(data->dir);
  2348. struct nfs_openres *o_res = &data->o_res;
  2349. int status;
  2350. status = nfs4_run_open_task(data, NULL);
  2351. if (status != 0 || !data->rpc_done)
  2352. return status;
  2353. nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
  2354. if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
  2355. status = _nfs4_proc_open_confirm(data);
  2356. return status;
  2357. }
  2358. /*
  2359. * Additional permission checks in order to distinguish between an
  2360. * open for read, and an open for execute. This works around the
  2361. * fact that NFSv4 OPEN treats read and execute permissions as being
  2362. * the same.
  2363. * Note that in the non-execute case, we want to turn off permission
  2364. * checking if we just created a new file (POSIX open() semantics).
  2365. */
  2366. static int nfs4_opendata_access(const struct cred *cred,
  2367. struct nfs4_opendata *opendata,
  2368. struct nfs4_state *state, fmode_t fmode,
  2369. int openflags)
  2370. {
  2371. struct nfs_access_entry cache;
  2372. u32 mask, flags;
  2373. /* access call failed or for some reason the server doesn't
  2374. * support any access modes -- defer access call until later */
  2375. if (opendata->o_res.access_supported == 0)
  2376. return 0;
  2377. mask = 0;
  2378. /*
  2379. * Use openflags to check for exec, because fmode won't
  2380. * always have FMODE_EXEC set when file open for exec.
  2381. */
  2382. if (openflags & __FMODE_EXEC) {
  2383. /* ONLY check for exec rights */
  2384. if (S_ISDIR(state->inode->i_mode))
  2385. mask = NFS4_ACCESS_LOOKUP;
  2386. else
  2387. mask = NFS4_ACCESS_EXECUTE;
  2388. } else if ((fmode & FMODE_READ) && !opendata->file_created)
  2389. mask = NFS4_ACCESS_READ;
  2390. nfs_access_set_mask(&cache, opendata->o_res.access_result);
  2391. nfs_access_add_cache(state->inode, &cache, cred);
  2392. flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
  2393. if ((mask & ~cache.mask & flags) == 0)
  2394. return 0;
  2395. return -EACCES;
  2396. }
  2397. /*
  2398. * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
  2399. */
  2400. static int _nfs4_proc_open(struct nfs4_opendata *data,
  2401. struct nfs_open_context *ctx)
  2402. {
  2403. struct inode *dir = d_inode(data->dir);
  2404. struct nfs_server *server = NFS_SERVER(dir);
  2405. struct nfs_openargs *o_arg = &data->o_arg;
  2406. struct nfs_openres *o_res = &data->o_res;
  2407. int status;
  2408. status = nfs4_run_open_task(data, ctx);
  2409. if (!data->rpc_done)
  2410. return status;
  2411. if (status != 0) {
  2412. if (status == -NFS4ERR_BADNAME &&
  2413. !(o_arg->open_flags & O_CREAT))
  2414. return -ENOENT;
  2415. return status;
  2416. }
  2417. nfs_fattr_map_and_free_names(server, &data->f_attr);
  2418. if (o_arg->open_flags & O_CREAT) {
  2419. if (o_arg->open_flags & O_EXCL)
  2420. data->file_created = true;
  2421. else if (o_res->cinfo.before != o_res->cinfo.after)
  2422. data->file_created = true;
  2423. if (data->file_created ||
  2424. inode_peek_iversion_raw(dir) != o_res->cinfo.after)
  2425. nfs4_update_changeattr(dir, &o_res->cinfo,
  2426. o_res->f_attr->time_start,
  2427. NFS_INO_INVALID_DATA);
  2428. }
  2429. if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
  2430. server->caps &= ~NFS_CAP_POSIX_LOCK;
  2431. if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
  2432. status = _nfs4_proc_open_confirm(data);
  2433. if (status != 0)
  2434. return status;
  2435. }
  2436. if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
  2437. struct nfs_fh *fh = &o_res->fh;
  2438. nfs4_sequence_free_slot(&o_res->seq_res);
  2439. if (o_arg->claim == NFS4_OPEN_CLAIM_FH)
  2440. fh = NFS_FH(d_inode(data->dentry));
  2441. nfs4_proc_getattr(server, fh, o_res->f_attr, NULL);
  2442. }
  2443. return 0;
  2444. }
  2445. /*
  2446. * OPEN_EXPIRED:
  2447. * reclaim state on the server after a network partition.
  2448. * Assumes caller holds the appropriate lock
  2449. */
  2450. static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
  2451. {
  2452. struct nfs4_opendata *opendata;
  2453. int ret;
  2454. opendata = nfs4_open_recoverdata_alloc(ctx, state, NFS4_OPEN_CLAIM_FH);
  2455. if (IS_ERR(opendata))
  2456. return PTR_ERR(opendata);
  2457. /*
  2458. * We're not recovering a delegation, so ask for no delegation.
  2459. * Otherwise the recovery thread could deadlock with an outstanding
  2460. * delegation return.
  2461. */
  2462. opendata->o_arg.open_flags = O_DIRECT;
  2463. ret = nfs4_open_recover(opendata, state);
  2464. if (ret == -ESTALE)
  2465. d_drop(ctx->dentry);
  2466. nfs4_opendata_put(opendata);
  2467. return ret;
  2468. }
  2469. static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
  2470. {
  2471. struct nfs_server *server = NFS_SERVER(state->inode);
  2472. struct nfs4_exception exception = { };
  2473. int err;
  2474. do {
  2475. err = _nfs4_open_expired(ctx, state);
  2476. trace_nfs4_open_expired(ctx, 0, err);
  2477. if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
  2478. continue;
  2479. switch (err) {
  2480. default:
  2481. goto out;
  2482. case -NFS4ERR_GRACE:
  2483. case -NFS4ERR_DELAY:
  2484. nfs4_handle_exception(server, err, &exception);
  2485. err = 0;
  2486. }
  2487. } while (exception.retry);
  2488. out:
  2489. return err;
  2490. }
  2491. static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
  2492. {
  2493. struct nfs_open_context *ctx;
  2494. int ret;
  2495. ctx = nfs4_state_find_open_context(state);
  2496. if (IS_ERR(ctx))
  2497. return -EAGAIN;
  2498. ret = nfs4_do_open_expired(ctx, state);
  2499. put_nfs_open_context(ctx);
  2500. return ret;
  2501. }
  2502. static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
  2503. const nfs4_stateid *stateid)
  2504. {
  2505. nfs_remove_bad_delegation(state->inode, stateid);
  2506. nfs_state_clear_delegation(state);
  2507. }
  2508. static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
  2509. {
  2510. if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
  2511. nfs_finish_clear_delegation_stateid(state, NULL);
  2512. }
  2513. static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
  2514. {
  2515. /* NFSv4.0 doesn't allow for delegation recovery on open expire */
  2516. nfs40_clear_delegation_stateid(state);
  2517. nfs_state_clear_open_state_flags(state);
  2518. return nfs4_open_expired(sp, state);
  2519. }
  2520. static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
  2521. nfs4_stateid *stateid,
  2522. const struct cred *cred)
  2523. {
  2524. return -NFS4ERR_BAD_STATEID;
  2525. }
  2526. #if defined(CONFIG_NFS_V4_1)
  2527. static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
  2528. nfs4_stateid *stateid,
  2529. const struct cred *cred)
  2530. {
  2531. int status;
  2532. switch (stateid->type) {
  2533. default:
  2534. break;
  2535. case NFS4_INVALID_STATEID_TYPE:
  2536. case NFS4_SPECIAL_STATEID_TYPE:
  2537. return -NFS4ERR_BAD_STATEID;
  2538. case NFS4_REVOKED_STATEID_TYPE:
  2539. goto out_free;
  2540. }
  2541. status = nfs41_test_stateid(server, stateid, cred);
  2542. switch (status) {
  2543. case -NFS4ERR_EXPIRED:
  2544. case -NFS4ERR_ADMIN_REVOKED:
  2545. case -NFS4ERR_DELEG_REVOKED:
  2546. break;
  2547. default:
  2548. return status;
  2549. }
  2550. out_free:
  2551. /* Ack the revoked state to the server */
  2552. nfs41_free_stateid(server, stateid, cred, true);
  2553. return -NFS4ERR_EXPIRED;
  2554. }
  2555. static int nfs41_check_delegation_stateid(struct nfs4_state *state)
  2556. {
  2557. struct nfs_server *server = NFS_SERVER(state->inode);
  2558. nfs4_stateid stateid;
  2559. struct nfs_delegation *delegation;
  2560. const struct cred *cred = NULL;
  2561. int status, ret = NFS_OK;
  2562. /* Get the delegation credential for use by test/free_stateid */
  2563. rcu_read_lock();
  2564. delegation = rcu_dereference(NFS_I(state->inode)->delegation);
  2565. if (delegation == NULL) {
  2566. rcu_read_unlock();
  2567. nfs_state_clear_delegation(state);
  2568. return NFS_OK;
  2569. }
  2570. spin_lock(&delegation->lock);
  2571. nfs4_stateid_copy(&stateid, &delegation->stateid);
  2572. if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
  2573. &delegation->flags)) {
  2574. spin_unlock(&delegation->lock);
  2575. rcu_read_unlock();
  2576. return NFS_OK;
  2577. }
  2578. if (delegation->cred)
  2579. cred = get_cred(delegation->cred);
  2580. spin_unlock(&delegation->lock);
  2581. rcu_read_unlock();
  2582. status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
  2583. trace_nfs4_test_delegation_stateid(state, NULL, status);
  2584. if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
  2585. nfs_finish_clear_delegation_stateid(state, &stateid);
  2586. else
  2587. ret = status;
  2588. put_cred(cred);
  2589. return ret;
  2590. }
  2591. static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
  2592. {
  2593. nfs4_stateid tmp;
  2594. if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
  2595. nfs4_copy_delegation_stateid(state->inode, state->state,
  2596. &tmp, NULL) &&
  2597. nfs4_stateid_match_other(&state->stateid, &tmp))
  2598. nfs_state_set_delegation(state, &tmp, state->state);
  2599. else
  2600. nfs_state_clear_delegation(state);
  2601. }
  2602. /**
  2603. * nfs41_check_expired_locks - possibly free a lock stateid
  2604. *
  2605. * @state: NFSv4 state for an inode
  2606. *
  2607. * Returns NFS_OK if recovery for this stateid is now finished.
  2608. * Otherwise a negative NFS4ERR value is returned.
  2609. */
  2610. static int nfs41_check_expired_locks(struct nfs4_state *state)
  2611. {
  2612. int status, ret = NFS_OK;
  2613. struct nfs4_lock_state *lsp, *prev = NULL;
  2614. struct nfs_server *server = NFS_SERVER(state->inode);
  2615. if (!test_bit(LK_STATE_IN_USE, &state->flags))
  2616. goto out;
  2617. spin_lock(&state->state_lock);
  2618. list_for_each_entry(lsp, &state->lock_states, ls_locks) {
  2619. if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
  2620. const struct cred *cred = lsp->ls_state->owner->so_cred;
  2621. refcount_inc(&lsp->ls_count);
  2622. spin_unlock(&state->state_lock);
  2623. nfs4_put_lock_state(prev);
  2624. prev = lsp;
  2625. status = nfs41_test_and_free_expired_stateid(server,
  2626. &lsp->ls_stateid,
  2627. cred);
  2628. trace_nfs4_test_lock_stateid(state, lsp, status);
  2629. if (status == -NFS4ERR_EXPIRED ||
  2630. status == -NFS4ERR_BAD_STATEID) {
  2631. clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
  2632. lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
  2633. if (!recover_lost_locks)
  2634. set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
  2635. } else if (status != NFS_OK) {
  2636. ret = status;
  2637. nfs4_put_lock_state(prev);
  2638. goto out;
  2639. }
  2640. spin_lock(&state->state_lock);
  2641. }
  2642. }
  2643. spin_unlock(&state->state_lock);
  2644. nfs4_put_lock_state(prev);
  2645. out:
  2646. return ret;
  2647. }
  2648. /**
  2649. * nfs41_check_open_stateid - possibly free an open stateid
  2650. *
  2651. * @state: NFSv4 state for an inode
  2652. *
  2653. * Returns NFS_OK if recovery for this stateid is now finished.
  2654. * Otherwise a negative NFS4ERR value is returned.
  2655. */
  2656. static int nfs41_check_open_stateid(struct nfs4_state *state)
  2657. {
  2658. struct nfs_server *server = NFS_SERVER(state->inode);
  2659. nfs4_stateid *stateid = &state->open_stateid;
  2660. const struct cred *cred = state->owner->so_cred;
  2661. int status;
  2662. if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
  2663. return -NFS4ERR_BAD_STATEID;
  2664. status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
  2665. trace_nfs4_test_open_stateid(state, NULL, status);
  2666. if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
  2667. nfs_state_clear_open_state_flags(state);
  2668. stateid->type = NFS4_INVALID_STATEID_TYPE;
  2669. return status;
  2670. }
  2671. if (nfs_open_stateid_recover_openmode(state))
  2672. return -NFS4ERR_OPENMODE;
  2673. return NFS_OK;
  2674. }
  2675. static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
  2676. {
  2677. int status;
  2678. status = nfs41_check_delegation_stateid(state);
  2679. if (status != NFS_OK)
  2680. return status;
  2681. nfs41_delegation_recover_stateid(state);
  2682. status = nfs41_check_expired_locks(state);
  2683. if (status != NFS_OK)
  2684. return status;
  2685. status = nfs41_check_open_stateid(state);
  2686. if (status != NFS_OK)
  2687. status = nfs4_open_expired(sp, state);
  2688. return status;
  2689. }
  2690. #endif
  2691. /*
  2692. * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
  2693. * fields corresponding to attributes that were used to store the verifier.
  2694. * Make sure we clobber those fields in the later setattr call
  2695. */
  2696. static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
  2697. struct iattr *sattr, struct nfs4_label **label)
  2698. {
  2699. const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
  2700. __u32 attrset[3];
  2701. unsigned ret;
  2702. unsigned i;
  2703. for (i = 0; i < ARRAY_SIZE(attrset); i++) {
  2704. attrset[i] = opendata->o_res.attrset[i];
  2705. if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
  2706. attrset[i] &= ~bitmask[i];
  2707. }
  2708. ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
  2709. sattr->ia_valid : 0;
  2710. if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
  2711. if (sattr->ia_valid & ATTR_ATIME_SET)
  2712. ret |= ATTR_ATIME_SET;
  2713. else
  2714. ret |= ATTR_ATIME;
  2715. }
  2716. if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
  2717. if (sattr->ia_valid & ATTR_MTIME_SET)
  2718. ret |= ATTR_MTIME_SET;
  2719. else
  2720. ret |= ATTR_MTIME;
  2721. }
  2722. if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
  2723. *label = NULL;
  2724. return ret;
  2725. }
  2726. static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
  2727. int flags, struct nfs_open_context *ctx)
  2728. {
  2729. struct nfs4_state_owner *sp = opendata->owner;
  2730. struct nfs_server *server = sp->so_server;
  2731. struct dentry *dentry;
  2732. struct nfs4_state *state;
  2733. fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
  2734. struct inode *dir = d_inode(opendata->dir);
  2735. unsigned long dir_verifier;
  2736. unsigned int seq;
  2737. int ret;
  2738. seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
  2739. dir_verifier = nfs_save_change_attribute(dir);
  2740. ret = _nfs4_proc_open(opendata, ctx);
  2741. if (ret != 0)
  2742. goto out;
  2743. state = _nfs4_opendata_to_nfs4_state(opendata);
  2744. ret = PTR_ERR(state);
  2745. if (IS_ERR(state))
  2746. goto out;
  2747. ctx->state = state;
  2748. if (server->caps & NFS_CAP_POSIX_LOCK)
  2749. set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
  2750. if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
  2751. set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
  2752. if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED)
  2753. set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags);
  2754. dentry = opendata->dentry;
  2755. if (d_really_is_negative(dentry)) {
  2756. struct dentry *alias;
  2757. d_drop(dentry);
  2758. alias = d_exact_alias(dentry, state->inode);
  2759. if (!alias)
  2760. alias = d_splice_alias(igrab(state->inode), dentry);
  2761. /* d_splice_alias() can't fail here - it's a non-directory */
  2762. if (alias) {
  2763. dput(ctx->dentry);
  2764. ctx->dentry = dentry = alias;
  2765. }
  2766. }
  2767. switch(opendata->o_arg.claim) {
  2768. default:
  2769. break;
  2770. case NFS4_OPEN_CLAIM_NULL:
  2771. case NFS4_OPEN_CLAIM_DELEGATE_CUR:
  2772. case NFS4_OPEN_CLAIM_DELEGATE_PREV:
  2773. if (!opendata->rpc_done)
  2774. break;
  2775. if (opendata->o_res.delegation_type != 0)
  2776. dir_verifier = nfs_save_change_attribute(dir);
  2777. nfs_set_verifier(dentry, dir_verifier);
  2778. }
  2779. /* Parse layoutget results before we check for access */
  2780. pnfs_parse_lgopen(state->inode, opendata->lgp, ctx);
  2781. ret = nfs4_opendata_access(sp->so_cred, opendata, state,
  2782. acc_mode, flags);
  2783. if (ret != 0)
  2784. goto out;
  2785. if (d_inode(dentry) == state->inode) {
  2786. nfs_inode_attach_open_context(ctx);
  2787. if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
  2788. nfs4_schedule_stateid_recovery(server, state);
  2789. }
  2790. out:
  2791. if (!opendata->cancelled) {
  2792. if (opendata->lgp) {
  2793. nfs4_lgopen_release(opendata->lgp);
  2794. opendata->lgp = NULL;
  2795. }
  2796. nfs4_sequence_free_slot(&opendata->o_res.seq_res);
  2797. }
  2798. return ret;
  2799. }
  2800. /*
  2801. * Returns a referenced nfs4_state
  2802. */
  2803. static int _nfs4_do_open(struct inode *dir,
  2804. struct nfs_open_context *ctx,
  2805. int flags,
  2806. const struct nfs4_open_createattrs *c,
  2807. int *opened)
  2808. {
  2809. struct nfs4_state_owner *sp;
  2810. struct nfs4_state *state = NULL;
  2811. struct nfs_server *server = NFS_SERVER(dir);
  2812. struct nfs4_opendata *opendata;
  2813. struct dentry *dentry = ctx->dentry;
  2814. const struct cred *cred = ctx->cred;
  2815. struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
  2816. fmode_t fmode = _nfs4_ctx_to_openmode(ctx);
  2817. enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
  2818. struct iattr *sattr = c->sattr;
  2819. struct nfs4_label *label = c->label;
  2820. int status;
  2821. /* Protect against reboot recovery conflicts */
  2822. status = -ENOMEM;
  2823. sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
  2824. if (sp == NULL) {
  2825. dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
  2826. goto out_err;
  2827. }
  2828. status = nfs4_client_recover_expired_lease(server->nfs_client);
  2829. if (status != 0)
  2830. goto err_put_state_owner;
  2831. if (d_really_is_positive(dentry))
  2832. nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
  2833. status = -ENOMEM;
  2834. if (d_really_is_positive(dentry))
  2835. claim = NFS4_OPEN_CLAIM_FH;
  2836. opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
  2837. c, claim, GFP_KERNEL);
  2838. if (opendata == NULL)
  2839. goto err_put_state_owner;
  2840. if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
  2841. if (!opendata->f_attr.mdsthreshold) {
  2842. opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
  2843. if (!opendata->f_attr.mdsthreshold)
  2844. goto err_opendata_put;
  2845. }
  2846. opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
  2847. }
  2848. if (d_really_is_positive(dentry))
  2849. opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
  2850. status = _nfs4_open_and_get_state(opendata, flags, ctx);
  2851. if (status != 0)
  2852. goto err_opendata_put;
  2853. state = ctx->state;
  2854. if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
  2855. (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
  2856. unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
  2857. /*
  2858. * send create attributes which was not set by open
  2859. * with an extra setattr.
  2860. */
  2861. if (attrs || label) {
  2862. unsigned ia_old = sattr->ia_valid;
  2863. sattr->ia_valid = attrs;
  2864. nfs_fattr_init(opendata->o_res.f_attr);
  2865. status = nfs4_do_setattr(state->inode, cred,
  2866. opendata->o_res.f_attr, sattr,
  2867. ctx, label);
  2868. if (status == 0) {
  2869. nfs_setattr_update_inode(state->inode, sattr,
  2870. opendata->o_res.f_attr);
  2871. nfs_setsecurity(state->inode, opendata->o_res.f_attr);
  2872. }
  2873. sattr->ia_valid = ia_old;
  2874. }
  2875. }
  2876. if (opened && opendata->file_created)
  2877. *opened = 1;
  2878. if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
  2879. *ctx_th = opendata->f_attr.mdsthreshold;
  2880. opendata->f_attr.mdsthreshold = NULL;
  2881. }
  2882. nfs4_opendata_put(opendata);
  2883. nfs4_put_state_owner(sp);
  2884. return 0;
  2885. err_opendata_put:
  2886. nfs4_opendata_put(opendata);
  2887. err_put_state_owner:
  2888. nfs4_put_state_owner(sp);
  2889. out_err:
  2890. return status;
  2891. }
  2892. static struct nfs4_state *nfs4_do_open(struct inode *dir,
  2893. struct nfs_open_context *ctx,
  2894. int flags,
  2895. struct iattr *sattr,
  2896. struct nfs4_label *label,
  2897. int *opened)
  2898. {
  2899. struct nfs_server *server = NFS_SERVER(dir);
  2900. struct nfs4_exception exception = {
  2901. .interruptible = true,
  2902. };
  2903. struct nfs4_state *res;
  2904. struct nfs4_open_createattrs c = {
  2905. .label = label,
  2906. .sattr = sattr,
  2907. .verf = {
  2908. [0] = (__u32)jiffies,
  2909. [1] = (__u32)current->pid,
  2910. },
  2911. };
  2912. int status;
  2913. do {
  2914. status = _nfs4_do_open(dir, ctx, flags, &c, opened);
  2915. res = ctx->state;
  2916. trace_nfs4_open_file(ctx, flags, status);
  2917. if (status == 0)
  2918. break;
  2919. /* NOTE: BAD_SEQID means the server and client disagree about the
  2920. * book-keeping w.r.t. state-changing operations
  2921. * (OPEN/CLOSE/LOCK/LOCKU...)
  2922. * It is actually a sign of a bug on the client or on the server.
  2923. *
  2924. * If we receive a BAD_SEQID error in the particular case of
  2925. * doing an OPEN, we assume that nfs_increment_open_seqid() will
  2926. * have unhashed the old state_owner for us, and that we can
  2927. * therefore safely retry using a new one. We should still warn
  2928. * the user though...
  2929. */
  2930. if (status == -NFS4ERR_BAD_SEQID) {
  2931. pr_warn_ratelimited("NFS: v4 server %s "
  2932. " returned a bad sequence-id error!\n",
  2933. NFS_SERVER(dir)->nfs_client->cl_hostname);
  2934. exception.retry = 1;
  2935. continue;
  2936. }
  2937. /*
  2938. * BAD_STATEID on OPEN means that the server cancelled our
  2939. * state before it received the OPEN_CONFIRM.
  2940. * Recover by retrying the request as per the discussion
  2941. * on Page 181 of RFC3530.
  2942. */
  2943. if (status == -NFS4ERR_BAD_STATEID) {
  2944. exception.retry = 1;
  2945. continue;
  2946. }
  2947. if (status == -NFS4ERR_EXPIRED) {
  2948. nfs4_schedule_lease_recovery(server->nfs_client);
  2949. exception.retry = 1;
  2950. continue;
  2951. }
  2952. if (status == -EAGAIN) {
  2953. /* We must have found a delegation */
  2954. exception.retry = 1;
  2955. continue;
  2956. }
  2957. if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
  2958. continue;
  2959. res = ERR_PTR(nfs4_handle_exception(server,
  2960. status, &exception));
  2961. } while (exception.retry);
  2962. return res;
  2963. }
  2964. static int _nfs4_do_setattr(struct inode *inode,
  2965. struct nfs_setattrargs *arg,
  2966. struct nfs_setattrres *res,
  2967. const struct cred *cred,
  2968. struct nfs_open_context *ctx)
  2969. {
  2970. struct nfs_server *server = NFS_SERVER(inode);
  2971. struct rpc_message msg = {
  2972. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
  2973. .rpc_argp = arg,
  2974. .rpc_resp = res,
  2975. .rpc_cred = cred,
  2976. };
  2977. const struct cred *delegation_cred = NULL;
  2978. unsigned long timestamp = jiffies;
  2979. bool truncate;
  2980. int status;
  2981. nfs_fattr_init(res->fattr);
  2982. /* Servers should only apply open mode checks for file size changes */
  2983. truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
  2984. if (!truncate) {
  2985. nfs4_inode_make_writeable(inode);
  2986. goto zero_stateid;
  2987. }
  2988. if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
  2989. /* Use that stateid */
  2990. } else if (ctx != NULL && ctx->state) {
  2991. struct nfs_lock_context *l_ctx;
  2992. if (!nfs4_valid_open_stateid(ctx->state))
  2993. return -EBADF;
  2994. l_ctx = nfs_get_lock_context(ctx);
  2995. if (IS_ERR(l_ctx))
  2996. return PTR_ERR(l_ctx);
  2997. status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
  2998. &arg->stateid, &delegation_cred);
  2999. nfs_put_lock_context(l_ctx);
  3000. if (status == -EIO)
  3001. return -EBADF;
  3002. else if (status == -EAGAIN)
  3003. goto zero_stateid;
  3004. } else {
  3005. zero_stateid:
  3006. nfs4_stateid_copy(&arg->stateid, &zero_stateid);
  3007. }
  3008. if (delegation_cred)
  3009. msg.rpc_cred = delegation_cred;
  3010. status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
  3011. put_cred(delegation_cred);
  3012. if (status == 0 && ctx != NULL)
  3013. renew_lease(server, timestamp);
  3014. trace_nfs4_setattr(inode, &arg->stateid, status);
  3015. return status;
  3016. }
  3017. static int nfs4_do_setattr(struct inode *inode, const struct cred *cred,
  3018. struct nfs_fattr *fattr, struct iattr *sattr,
  3019. struct nfs_open_context *ctx, struct nfs4_label *ilabel)
  3020. {
  3021. struct nfs_server *server = NFS_SERVER(inode);
  3022. __u32 bitmask[NFS4_BITMASK_SZ];
  3023. struct nfs4_state *state = ctx ? ctx->state : NULL;
  3024. struct nfs_setattrargs arg = {
  3025. .fh = NFS_FH(inode),
  3026. .iap = sattr,
  3027. .server = server,
  3028. .bitmask = bitmask,
  3029. .label = ilabel,
  3030. };
  3031. struct nfs_setattrres res = {
  3032. .fattr = fattr,
  3033. .server = server,
  3034. };
  3035. struct nfs4_exception exception = {
  3036. .state = state,
  3037. .inode = inode,
  3038. .stateid = &arg.stateid,
  3039. };
  3040. unsigned long adjust_flags = NFS_INO_INVALID_CHANGE;
  3041. int err;
  3042. if (sattr->ia_valid & (ATTR_MODE | ATTR_KILL_SUID | ATTR_KILL_SGID))
  3043. adjust_flags |= NFS_INO_INVALID_MODE;
  3044. if (sattr->ia_valid & (ATTR_UID | ATTR_GID))
  3045. adjust_flags |= NFS_INO_INVALID_OTHER;
  3046. do {
  3047. nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label),
  3048. inode, adjust_flags);
  3049. err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
  3050. switch (err) {
  3051. case -NFS4ERR_OPENMODE:
  3052. if (!(sattr->ia_valid & ATTR_SIZE)) {
  3053. pr_warn_once("NFSv4: server %s is incorrectly "
  3054. "applying open mode checks to "
  3055. "a SETATTR that is not "
  3056. "changing file size.\n",
  3057. server->nfs_client->cl_hostname);
  3058. }
  3059. if (state && !(state->state & FMODE_WRITE)) {
  3060. err = -EBADF;
  3061. if (sattr->ia_valid & ATTR_OPEN)
  3062. err = -EACCES;
  3063. goto out;
  3064. }
  3065. }
  3066. err = nfs4_handle_exception(server, err, &exception);
  3067. } while (exception.retry);
  3068. out:
  3069. return err;
  3070. }
  3071. static bool
  3072. nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
  3073. {
  3074. if (inode == NULL || !nfs_have_layout(inode))
  3075. return false;
  3076. return pnfs_wait_on_layoutreturn(inode, task);
  3077. }
  3078. /*
  3079. * Update the seqid of an open stateid
  3080. */
  3081. static void nfs4_sync_open_stateid(nfs4_stateid *dst,
  3082. struct nfs4_state *state)
  3083. {
  3084. __be32 seqid_open;
  3085. u32 dst_seqid;
  3086. int seq;
  3087. for (;;) {
  3088. if (!nfs4_valid_open_stateid(state))
  3089. break;
  3090. seq = read_seqbegin(&state->seqlock);
  3091. if (!nfs4_state_match_open_stateid_other(state, dst)) {
  3092. nfs4_stateid_copy(dst, &state->open_stateid);
  3093. if (read_seqretry(&state->seqlock, seq))
  3094. continue;
  3095. break;
  3096. }
  3097. seqid_open = state->open_stateid.seqid;
  3098. if (read_seqretry(&state->seqlock, seq))
  3099. continue;
  3100. dst_seqid = be32_to_cpu(dst->seqid);
  3101. if ((s32)(dst_seqid - be32_to_cpu(seqid_open)) < 0)
  3102. dst->seqid = seqid_open;
  3103. break;
  3104. }
  3105. }
  3106. /*
  3107. * Update the seqid of an open stateid after receiving
  3108. * NFS4ERR_OLD_STATEID
  3109. */
  3110. static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
  3111. struct nfs4_state *state)
  3112. {
  3113. __be32 seqid_open;
  3114. u32 dst_seqid;
  3115. bool ret;
  3116. int seq, status = -EAGAIN;
  3117. DEFINE_WAIT(wait);
  3118. for (;;) {
  3119. ret = false;
  3120. if (!nfs4_valid_open_stateid(state))
  3121. break;
  3122. seq = read_seqbegin(&state->seqlock);
  3123. if (!nfs4_state_match_open_stateid_other(state, dst)) {
  3124. if (read_seqretry(&state->seqlock, seq))
  3125. continue;
  3126. break;
  3127. }
  3128. write_seqlock(&state->seqlock);
  3129. seqid_open = state->open_stateid.seqid;
  3130. dst_seqid = be32_to_cpu(dst->seqid);
  3131. /* Did another OPEN bump the state's seqid? try again: */
  3132. if ((s32)(be32_to_cpu(seqid_open) - dst_seqid) > 0) {
  3133. dst->seqid = seqid_open;
  3134. write_sequnlock(&state->seqlock);
  3135. ret = true;
  3136. break;
  3137. }
  3138. /* server says we're behind but we haven't seen the update yet */
  3139. set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
  3140. prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
  3141. write_sequnlock(&state->seqlock);
  3142. trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
  3143. if (fatal_signal_pending(current))
  3144. status = -EINTR;
  3145. else
  3146. if (schedule_timeout(5*HZ) != 0)
  3147. status = 0;
  3148. finish_wait(&state->waitq, &wait);
  3149. if (!status)
  3150. continue;
  3151. if (status == -EINTR)
  3152. break;
  3153. /* we slept the whole 5 seconds, we must have lost a seqid */
  3154. dst->seqid = cpu_to_be32(dst_seqid + 1);
  3155. ret = true;
  3156. break;
  3157. }
  3158. return ret;
  3159. }
  3160. struct nfs4_closedata {
  3161. struct inode *inode;
  3162. struct nfs4_state *state;
  3163. struct nfs_closeargs arg;
  3164. struct nfs_closeres res;
  3165. struct {
  3166. struct nfs4_layoutreturn_args arg;
  3167. struct nfs4_layoutreturn_res res;
  3168. struct nfs4_xdr_opaque_data ld_private;
  3169. u32 roc_barrier;
  3170. bool roc;
  3171. } lr;
  3172. struct nfs_fattr fattr;
  3173. unsigned long timestamp;
  3174. };
  3175. static void nfs4_free_closedata(void *data)
  3176. {
  3177. struct nfs4_closedata *calldata = data;
  3178. struct nfs4_state_owner *sp = calldata->state->owner;
  3179. struct super_block *sb = calldata->state->inode->i_sb;
  3180. if (calldata->lr.roc)
  3181. pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
  3182. calldata->res.lr_ret);
  3183. nfs4_put_open_state(calldata->state);
  3184. nfs_free_seqid(calldata->arg.seqid);
  3185. nfs4_put_state_owner(sp);
  3186. nfs_sb_deactive(sb);
  3187. kfree(calldata);
  3188. }
  3189. static void nfs4_close_done(struct rpc_task *task, void *data)
  3190. {
  3191. struct nfs4_closedata *calldata = data;
  3192. struct nfs4_state *state = calldata->state;
  3193. struct nfs_server *server = NFS_SERVER(calldata->inode);
  3194. nfs4_stateid *res_stateid = NULL;
  3195. struct nfs4_exception exception = {
  3196. .state = state,
  3197. .inode = calldata->inode,
  3198. .stateid = &calldata->arg.stateid,
  3199. };
  3200. if (!nfs4_sequence_done(task, &calldata->res.seq_res))
  3201. return;
  3202. trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
  3203. /* Handle Layoutreturn errors */
  3204. if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
  3205. &calldata->res.lr_ret) == -EAGAIN)
  3206. goto out_restart;
  3207. /* hmm. we are done with the inode, and in the process of freeing
  3208. * the state_owner. we keep this around to process errors
  3209. */
  3210. switch (task->tk_status) {
  3211. case 0:
  3212. res_stateid = &calldata->res.stateid;
  3213. renew_lease(server, calldata->timestamp);
  3214. break;
  3215. case -NFS4ERR_ACCESS:
  3216. if (calldata->arg.bitmask != NULL) {
  3217. calldata->arg.bitmask = NULL;
  3218. calldata->res.fattr = NULL;
  3219. goto out_restart;
  3220. }
  3221. break;
  3222. case -NFS4ERR_OLD_STATEID:
  3223. /* Did we race with OPEN? */
  3224. if (nfs4_refresh_open_old_stateid(&calldata->arg.stateid,
  3225. state))
  3226. goto out_restart;
  3227. goto out_release;
  3228. case -NFS4ERR_ADMIN_REVOKED:
  3229. case -NFS4ERR_STALE_STATEID:
  3230. case -NFS4ERR_EXPIRED:
  3231. nfs4_free_revoked_stateid(server,
  3232. &calldata->arg.stateid,
  3233. task->tk_msg.rpc_cred);
  3234. fallthrough;
  3235. case -NFS4ERR_BAD_STATEID:
  3236. if (calldata->arg.fmode == 0)
  3237. break;
  3238. fallthrough;
  3239. default:
  3240. task->tk_status = nfs4_async_handle_exception(task,
  3241. server, task->tk_status, &exception);
  3242. if (exception.retry)
  3243. goto out_restart;
  3244. }
  3245. nfs_clear_open_stateid(state, &calldata->arg.stateid,
  3246. res_stateid, calldata->arg.fmode);
  3247. out_release:
  3248. task->tk_status = 0;
  3249. nfs_release_seqid(calldata->arg.seqid);
  3250. nfs_refresh_inode(calldata->inode, &calldata->fattr);
  3251. dprintk("%s: ret = %d\n", __func__, task->tk_status);
  3252. return;
  3253. out_restart:
  3254. task->tk_status = 0;
  3255. rpc_restart_call_prepare(task);
  3256. goto out_release;
  3257. }
  3258. static void nfs4_close_prepare(struct rpc_task *task, void *data)
  3259. {
  3260. struct nfs4_closedata *calldata = data;
  3261. struct nfs4_state *state = calldata->state;
  3262. struct inode *inode = calldata->inode;
  3263. struct nfs_server *server = NFS_SERVER(inode);
  3264. struct pnfs_layout_hdr *lo;
  3265. bool is_rdonly, is_wronly, is_rdwr;
  3266. int call_close = 0;
  3267. if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
  3268. goto out_wait;
  3269. task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
  3270. spin_lock(&state->owner->so_lock);
  3271. is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
  3272. is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
  3273. is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
  3274. /* Calculate the change in open mode */
  3275. calldata->arg.fmode = 0;
  3276. if (state->n_rdwr == 0) {
  3277. if (state->n_rdonly == 0)
  3278. call_close |= is_rdonly;
  3279. else if (is_rdonly)
  3280. calldata->arg.fmode |= FMODE_READ;
  3281. if (state->n_wronly == 0)
  3282. call_close |= is_wronly;
  3283. else if (is_wronly)
  3284. calldata->arg.fmode |= FMODE_WRITE;
  3285. if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
  3286. call_close |= is_rdwr;
  3287. } else if (is_rdwr)
  3288. calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
  3289. nfs4_sync_open_stateid(&calldata->arg.stateid, state);
  3290. if (!nfs4_valid_open_stateid(state))
  3291. call_close = 0;
  3292. spin_unlock(&state->owner->so_lock);
  3293. if (!call_close) {
  3294. /* Note: exit _without_ calling nfs4_close_done */
  3295. goto out_no_action;
  3296. }
  3297. if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
  3298. nfs_release_seqid(calldata->arg.seqid);
  3299. goto out_wait;
  3300. }
  3301. lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
  3302. if (lo && !pnfs_layout_is_valid(lo)) {
  3303. calldata->arg.lr_args = NULL;
  3304. calldata->res.lr_res = NULL;
  3305. }
  3306. if (calldata->arg.fmode == 0)
  3307. task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
  3308. if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
  3309. /* Close-to-open cache consistency revalidation */
  3310. if (!nfs4_have_delegation(inode, FMODE_READ)) {
  3311. nfs4_bitmask_set(calldata->arg.bitmask_store,
  3312. server->cache_consistency_bitmask,
  3313. inode, 0);
  3314. calldata->arg.bitmask = calldata->arg.bitmask_store;
  3315. } else
  3316. calldata->arg.bitmask = NULL;
  3317. }
  3318. calldata->arg.share_access =
  3319. nfs4_map_atomic_open_share(NFS_SERVER(inode),
  3320. calldata->arg.fmode, 0);
  3321. if (calldata->res.fattr == NULL)
  3322. calldata->arg.bitmask = NULL;
  3323. else if (calldata->arg.bitmask == NULL)
  3324. calldata->res.fattr = NULL;
  3325. calldata->timestamp = jiffies;
  3326. if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
  3327. &calldata->arg.seq_args,
  3328. &calldata->res.seq_res,
  3329. task) != 0)
  3330. nfs_release_seqid(calldata->arg.seqid);
  3331. return;
  3332. out_no_action:
  3333. task->tk_action = NULL;
  3334. out_wait:
  3335. nfs4_sequence_done(task, &calldata->res.seq_res);
  3336. }
  3337. static const struct rpc_call_ops nfs4_close_ops = {
  3338. .rpc_call_prepare = nfs4_close_prepare,
  3339. .rpc_call_done = nfs4_close_done,
  3340. .rpc_release = nfs4_free_closedata,
  3341. };
  3342. /*
  3343. * It is possible for data to be read/written from a mem-mapped file
  3344. * after the sys_close call (which hits the vfs layer as a flush).
  3345. * This means that we can't safely call nfsv4 close on a file until
  3346. * the inode is cleared. This in turn means that we are not good
  3347. * NFSv4 citizens - we do not indicate to the server to update the file's
  3348. * share state even when we are done with one of the three share
  3349. * stateid's in the inode.
  3350. *
  3351. * NOTE: Caller must be holding the sp->so_owner semaphore!
  3352. */
  3353. int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
  3354. {
  3355. struct nfs_server *server = NFS_SERVER(state->inode);
  3356. struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
  3357. struct nfs4_closedata *calldata;
  3358. struct nfs4_state_owner *sp = state->owner;
  3359. struct rpc_task *task;
  3360. struct rpc_message msg = {
  3361. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
  3362. .rpc_cred = state->owner->so_cred,
  3363. };
  3364. struct rpc_task_setup task_setup_data = {
  3365. .rpc_client = server->client,
  3366. .rpc_message = &msg,
  3367. .callback_ops = &nfs4_close_ops,
  3368. .workqueue = nfsiod_workqueue,
  3369. .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
  3370. };
  3371. int status = -ENOMEM;
  3372. if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
  3373. task_setup_data.flags |= RPC_TASK_MOVEABLE;
  3374. nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
  3375. &task_setup_data.rpc_client, &msg);
  3376. calldata = kzalloc(sizeof(*calldata), gfp_mask);
  3377. if (calldata == NULL)
  3378. goto out;
  3379. nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1, 0);
  3380. calldata->inode = state->inode;
  3381. calldata->state = state;
  3382. calldata->arg.fh = NFS_FH(state->inode);
  3383. if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
  3384. goto out_free_calldata;
  3385. /* Serialization for the sequence id */
  3386. alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
  3387. calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
  3388. if (IS_ERR(calldata->arg.seqid))
  3389. goto out_free_calldata;
  3390. nfs_fattr_init(&calldata->fattr);
  3391. calldata->arg.fmode = 0;
  3392. calldata->lr.arg.ld_private = &calldata->lr.ld_private;
  3393. calldata->res.fattr = &calldata->fattr;
  3394. calldata->res.seqid = calldata->arg.seqid;
  3395. calldata->res.server = server;
  3396. calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
  3397. calldata->lr.roc = pnfs_roc(state->inode,
  3398. &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
  3399. if (calldata->lr.roc) {
  3400. calldata->arg.lr_args = &calldata->lr.arg;
  3401. calldata->res.lr_res = &calldata->lr.res;
  3402. }
  3403. nfs_sb_active(calldata->inode->i_sb);
  3404. msg.rpc_argp = &calldata->arg;
  3405. msg.rpc_resp = &calldata->res;
  3406. task_setup_data.callback_data = calldata;
  3407. task = rpc_run_task(&task_setup_data);
  3408. if (IS_ERR(task))
  3409. return PTR_ERR(task);
  3410. status = 0;
  3411. if (wait)
  3412. status = rpc_wait_for_completion_task(task);
  3413. rpc_put_task(task);
  3414. return status;
  3415. out_free_calldata:
  3416. kfree(calldata);
  3417. out:
  3418. nfs4_put_open_state(state);
  3419. nfs4_put_state_owner(sp);
  3420. return status;
  3421. }
  3422. static struct inode *
  3423. nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
  3424. int open_flags, struct iattr *attr, int *opened)
  3425. {
  3426. struct nfs4_state *state;
  3427. struct nfs4_label l, *label;
  3428. label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
  3429. /* Protect against concurrent sillydeletes */
  3430. state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
  3431. nfs4_label_release_security(label);
  3432. if (IS_ERR(state))
  3433. return ERR_CAST(state);
  3434. return state->inode;
  3435. }
  3436. static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
  3437. {
  3438. if (ctx->state == NULL)
  3439. return;
  3440. if (is_sync)
  3441. nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx));
  3442. else
  3443. nfs4_close_state(ctx->state, _nfs4_ctx_to_openmode(ctx));
  3444. }
  3445. #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
  3446. #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
  3447. #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL)
  3448. static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
  3449. {
  3450. u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
  3451. struct nfs4_server_caps_arg args = {
  3452. .fhandle = fhandle,
  3453. .bitmask = bitmask,
  3454. };
  3455. struct nfs4_server_caps_res res = {};
  3456. struct rpc_message msg = {
  3457. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
  3458. .rpc_argp = &args,
  3459. .rpc_resp = &res,
  3460. };
  3461. int status;
  3462. int i;
  3463. bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
  3464. FATTR4_WORD0_FH_EXPIRE_TYPE |
  3465. FATTR4_WORD0_LINK_SUPPORT |
  3466. FATTR4_WORD0_SYMLINK_SUPPORT |
  3467. FATTR4_WORD0_ACLSUPPORT |
  3468. FATTR4_WORD0_CASE_INSENSITIVE |
  3469. FATTR4_WORD0_CASE_PRESERVING;
  3470. if (minorversion)
  3471. bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
  3472. status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
  3473. if (status == 0) {
  3474. /* Sanity check the server answers */
  3475. switch (minorversion) {
  3476. case 0:
  3477. res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
  3478. res.attr_bitmask[2] = 0;
  3479. break;
  3480. case 1:
  3481. res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
  3482. break;
  3483. case 2:
  3484. res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
  3485. }
  3486. memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
  3487. server->caps &= ~(NFS_CAP_ACLS | NFS_CAP_HARDLINKS |
  3488. NFS_CAP_SYMLINKS| NFS_CAP_SECURITY_LABEL);
  3489. server->fattr_valid = NFS_ATTR_FATTR_V4;
  3490. if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
  3491. res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
  3492. server->caps |= NFS_CAP_ACLS;
  3493. if (res.has_links != 0)
  3494. server->caps |= NFS_CAP_HARDLINKS;
  3495. if (res.has_symlinks != 0)
  3496. server->caps |= NFS_CAP_SYMLINKS;
  3497. if (res.case_insensitive)
  3498. server->caps |= NFS_CAP_CASE_INSENSITIVE;
  3499. if (res.case_preserving)
  3500. server->caps |= NFS_CAP_CASE_PRESERVING;
  3501. #ifdef CONFIG_NFS_V4_SECURITY_LABEL
  3502. if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
  3503. server->caps |= NFS_CAP_SECURITY_LABEL;
  3504. #endif
  3505. if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
  3506. server->caps |= NFS_CAP_FS_LOCATIONS;
  3507. if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
  3508. server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
  3509. if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
  3510. server->fattr_valid &= ~NFS_ATTR_FATTR_MODE;
  3511. if (!(res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS))
  3512. server->fattr_valid &= ~NFS_ATTR_FATTR_NLINK;
  3513. if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER))
  3514. server->fattr_valid &= ~(NFS_ATTR_FATTR_OWNER |
  3515. NFS_ATTR_FATTR_OWNER_NAME);
  3516. if (!(res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP))
  3517. server->fattr_valid &= ~(NFS_ATTR_FATTR_GROUP |
  3518. NFS_ATTR_FATTR_GROUP_NAME);
  3519. if (!(res.attr_bitmask[1] & FATTR4_WORD1_SPACE_USED))
  3520. server->fattr_valid &= ~NFS_ATTR_FATTR_SPACE_USED;
  3521. if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS))
  3522. server->fattr_valid &= ~NFS_ATTR_FATTR_ATIME;
  3523. if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA))
  3524. server->fattr_valid &= ~NFS_ATTR_FATTR_CTIME;
  3525. if (!(res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY))
  3526. server->fattr_valid &= ~NFS_ATTR_FATTR_MTIME;
  3527. memcpy(server->attr_bitmask_nl, res.attr_bitmask,
  3528. sizeof(server->attr_bitmask));
  3529. server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
  3530. memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
  3531. server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
  3532. server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
  3533. server->cache_consistency_bitmask[2] = 0;
  3534. /* Avoid a regression due to buggy server */
  3535. for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
  3536. res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
  3537. memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
  3538. sizeof(server->exclcreat_bitmask));
  3539. server->acl_bitmask = res.acl_bitmask;
  3540. server->fh_expire_type = res.fh_expire_type;
  3541. }
  3542. return status;
  3543. }
  3544. int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
  3545. {
  3546. struct nfs4_exception exception = {
  3547. .interruptible = true,
  3548. };
  3549. int err;
  3550. nfs4_server_set_init_caps(server);
  3551. do {
  3552. err = nfs4_handle_exception(server,
  3553. _nfs4_server_capabilities(server, fhandle),
  3554. &exception);
  3555. } while (exception.retry);
  3556. return err;
  3557. }
  3558. static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
  3559. struct nfs_client *clp,
  3560. struct nfs_server *server)
  3561. {
  3562. int i;
  3563. for (i = 0; i < location->nservers; i++) {
  3564. struct nfs4_string *srv_loc = &location->servers[i];
  3565. struct sockaddr_storage addr;
  3566. size_t addrlen;
  3567. struct xprt_create xprt_args = {
  3568. .ident = 0,
  3569. .net = clp->cl_net,
  3570. };
  3571. struct nfs4_add_xprt_data xprtdata = {
  3572. .clp = clp,
  3573. };
  3574. struct rpc_add_xprt_test rpcdata = {
  3575. .add_xprt_test = clp->cl_mvops->session_trunk,
  3576. .data = &xprtdata,
  3577. };
  3578. char *servername = NULL;
  3579. if (!srv_loc->len)
  3580. continue;
  3581. addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
  3582. &addr, sizeof(addr),
  3583. clp->cl_net, server->port);
  3584. if (!addrlen)
  3585. return;
  3586. xprt_args.dstaddr = (struct sockaddr *)&addr;
  3587. xprt_args.addrlen = addrlen;
  3588. servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
  3589. if (!servername)
  3590. return;
  3591. memcpy(servername, srv_loc->data, srv_loc->len);
  3592. servername[srv_loc->len] = '\0';
  3593. xprt_args.servername = servername;
  3594. xprtdata.cred = nfs4_get_clid_cred(clp);
  3595. rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
  3596. rpc_clnt_setup_test_and_add_xprt,
  3597. &rpcdata);
  3598. if (xprtdata.cred)
  3599. put_cred(xprtdata.cred);
  3600. kfree(servername);
  3601. }
  3602. }
  3603. static int _nfs4_discover_trunking(struct nfs_server *server,
  3604. struct nfs_fh *fhandle)
  3605. {
  3606. struct nfs4_fs_locations *locations = NULL;
  3607. struct page *page;
  3608. const struct cred *cred;
  3609. struct nfs_client *clp = server->nfs_client;
  3610. const struct nfs4_state_maintenance_ops *ops =
  3611. clp->cl_mvops->state_renewal_ops;
  3612. int status = -ENOMEM, i;
  3613. cred = ops->get_state_renewal_cred(clp);
  3614. if (cred == NULL) {
  3615. cred = nfs4_get_clid_cred(clp);
  3616. if (cred == NULL)
  3617. return -ENOKEY;
  3618. }
  3619. page = alloc_page(GFP_KERNEL);
  3620. if (!page)
  3621. goto out_put_cred;
  3622. locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
  3623. if (!locations)
  3624. goto out_free;
  3625. locations->fattr = nfs_alloc_fattr();
  3626. if (!locations->fattr)
  3627. goto out_free_2;
  3628. status = nfs4_proc_get_locations(server, fhandle, locations, page,
  3629. cred);
  3630. if (status)
  3631. goto out_free_3;
  3632. for (i = 0; i < locations->nlocations; i++)
  3633. test_fs_location_for_trunking(&locations->locations[i], clp,
  3634. server);
  3635. out_free_3:
  3636. kfree(locations->fattr);
  3637. out_free_2:
  3638. kfree(locations);
  3639. out_free:
  3640. __free_page(page);
  3641. out_put_cred:
  3642. put_cred(cred);
  3643. return status;
  3644. }
  3645. static int nfs4_discover_trunking(struct nfs_server *server,
  3646. struct nfs_fh *fhandle)
  3647. {
  3648. struct nfs4_exception exception = {
  3649. .interruptible = true,
  3650. };
  3651. struct nfs_client *clp = server->nfs_client;
  3652. int err = 0;
  3653. if (!nfs4_has_session(clp))
  3654. goto out;
  3655. do {
  3656. err = nfs4_handle_exception(server,
  3657. _nfs4_discover_trunking(server, fhandle),
  3658. &exception);
  3659. } while (exception.retry);
  3660. out:
  3661. return err;
  3662. }
  3663. static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
  3664. struct nfs_fsinfo *info)
  3665. {
  3666. u32 bitmask[3];
  3667. struct nfs4_lookup_root_arg args = {
  3668. .bitmask = bitmask,
  3669. };
  3670. struct nfs4_lookup_res res = {
  3671. .server = server,
  3672. .fattr = info->fattr,
  3673. .fh = fhandle,
  3674. };
  3675. struct rpc_message msg = {
  3676. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
  3677. .rpc_argp = &args,
  3678. .rpc_resp = &res,
  3679. };
  3680. bitmask[0] = nfs4_fattr_bitmap[0];
  3681. bitmask[1] = nfs4_fattr_bitmap[1];
  3682. /*
  3683. * Process the label in the upcoming getfattr
  3684. */
  3685. bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
  3686. nfs_fattr_init(info->fattr);
  3687. return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
  3688. }
  3689. static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
  3690. struct nfs_fsinfo *info)
  3691. {
  3692. struct nfs4_exception exception = {
  3693. .interruptible = true,
  3694. };
  3695. int err;
  3696. do {
  3697. err = _nfs4_lookup_root(server, fhandle, info);
  3698. trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
  3699. switch (err) {
  3700. case 0:
  3701. case -NFS4ERR_WRONGSEC:
  3702. goto out;
  3703. default:
  3704. err = nfs4_handle_exception(server, err, &exception);
  3705. }
  3706. } while (exception.retry);
  3707. out:
  3708. return err;
  3709. }
  3710. static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
  3711. struct nfs_fsinfo *info, rpc_authflavor_t flavor)
  3712. {
  3713. struct rpc_auth_create_args auth_args = {
  3714. .pseudoflavor = flavor,
  3715. };
  3716. struct rpc_auth *auth;
  3717. auth = rpcauth_create(&auth_args, server->client);
  3718. if (IS_ERR(auth))
  3719. return -EACCES;
  3720. return nfs4_lookup_root(server, fhandle, info);
  3721. }
  3722. /*
  3723. * Retry pseudoroot lookup with various security flavors. We do this when:
  3724. *
  3725. * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
  3726. * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
  3727. *
  3728. * Returns zero on success, or a negative NFS4ERR value, or a
  3729. * negative errno value.
  3730. */
  3731. static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
  3732. struct nfs_fsinfo *info)
  3733. {
  3734. /* Per 3530bis 15.33.5 */
  3735. static const rpc_authflavor_t flav_array[] = {
  3736. RPC_AUTH_GSS_KRB5P,
  3737. RPC_AUTH_GSS_KRB5I,
  3738. RPC_AUTH_GSS_KRB5,
  3739. RPC_AUTH_UNIX, /* courtesy */
  3740. RPC_AUTH_NULL,
  3741. };
  3742. int status = -EPERM;
  3743. size_t i;
  3744. if (server->auth_info.flavor_len > 0) {
  3745. /* try each flavor specified by user */
  3746. for (i = 0; i < server->auth_info.flavor_len; i++) {
  3747. status = nfs4_lookup_root_sec(server, fhandle, info,
  3748. server->auth_info.flavors[i]);
  3749. if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
  3750. continue;
  3751. break;
  3752. }
  3753. } else {
  3754. /* no flavors specified by user, try default list */
  3755. for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
  3756. status = nfs4_lookup_root_sec(server, fhandle, info,
  3757. flav_array[i]);
  3758. if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
  3759. continue;
  3760. break;
  3761. }
  3762. }
  3763. /*
  3764. * -EACCES could mean that the user doesn't have correct permissions
  3765. * to access the mount. It could also mean that we tried to mount
  3766. * with a gss auth flavor, but rpc.gssd isn't running. Either way,
  3767. * existing mount programs don't handle -EACCES very well so it should
  3768. * be mapped to -EPERM instead.
  3769. */
  3770. if (status == -EACCES)
  3771. status = -EPERM;
  3772. return status;
  3773. }
  3774. /**
  3775. * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
  3776. * @server: initialized nfs_server handle
  3777. * @fhandle: we fill in the pseudo-fs root file handle
  3778. * @info: we fill in an FSINFO struct
  3779. * @auth_probe: probe the auth flavours
  3780. *
  3781. * Returns zero on success, or a negative errno.
  3782. */
  3783. int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
  3784. struct nfs_fsinfo *info,
  3785. bool auth_probe)
  3786. {
  3787. int status = 0;
  3788. if (!auth_probe)
  3789. status = nfs4_lookup_root(server, fhandle, info);
  3790. if (auth_probe || status == NFS4ERR_WRONGSEC)
  3791. status = server->nfs_client->cl_mvops->find_root_sec(server,
  3792. fhandle, info);
  3793. if (status == 0)
  3794. status = nfs4_server_capabilities(server, fhandle);
  3795. if (status == 0)
  3796. status = nfs4_do_fsinfo(server, fhandle, info);
  3797. return nfs4_map_errors(status);
  3798. }
  3799. static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
  3800. struct nfs_fsinfo *info)
  3801. {
  3802. int error;
  3803. struct nfs_fattr *fattr = info->fattr;
  3804. error = nfs4_server_capabilities(server, mntfh);
  3805. if (error < 0) {
  3806. dprintk("nfs4_get_root: getcaps error = %d\n", -error);
  3807. return error;
  3808. }
  3809. error = nfs4_proc_getattr(server, mntfh, fattr, NULL);
  3810. if (error < 0) {
  3811. dprintk("nfs4_get_root: getattr error = %d\n", -error);
  3812. goto out;
  3813. }
  3814. if (fattr->valid & NFS_ATTR_FATTR_FSID &&
  3815. !nfs_fsid_equal(&server->fsid, &fattr->fsid))
  3816. memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
  3817. out:
  3818. return error;
  3819. }
  3820. /*
  3821. * Get locations and (maybe) other attributes of a referral.
  3822. * Note that we'll actually follow the referral later when
  3823. * we detect fsid mismatch in inode revalidation
  3824. */
  3825. static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
  3826. const struct qstr *name, struct nfs_fattr *fattr,
  3827. struct nfs_fh *fhandle)
  3828. {
  3829. int status = -ENOMEM;
  3830. struct page *page = NULL;
  3831. struct nfs4_fs_locations *locations = NULL;
  3832. page = alloc_page(GFP_KERNEL);
  3833. if (page == NULL)
  3834. goto out;
  3835. locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
  3836. if (locations == NULL)
  3837. goto out;
  3838. locations->fattr = fattr;
  3839. status = nfs4_proc_fs_locations(client, dir, name, locations, page);
  3840. if (status != 0)
  3841. goto out;
  3842. /*
  3843. * If the fsid didn't change, this is a migration event, not a
  3844. * referral. Cause us to drop into the exception handler, which
  3845. * will kick off migration recovery.
  3846. */
  3847. if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &fattr->fsid)) {
  3848. dprintk("%s: server did not return a different fsid for"
  3849. " a referral at %s\n", __func__, name->name);
  3850. status = -NFS4ERR_MOVED;
  3851. goto out;
  3852. }
  3853. /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
  3854. nfs_fixup_referral_attributes(fattr);
  3855. memset(fhandle, 0, sizeof(struct nfs_fh));
  3856. out:
  3857. if (page)
  3858. __free_page(page);
  3859. kfree(locations);
  3860. return status;
  3861. }
  3862. static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
  3863. struct nfs_fattr *fattr, struct inode *inode)
  3864. {
  3865. __u32 bitmask[NFS4_BITMASK_SZ];
  3866. struct nfs4_getattr_arg args = {
  3867. .fh = fhandle,
  3868. .bitmask = bitmask,
  3869. };
  3870. struct nfs4_getattr_res res = {
  3871. .fattr = fattr,
  3872. .server = server,
  3873. };
  3874. struct rpc_message msg = {
  3875. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
  3876. .rpc_argp = &args,
  3877. .rpc_resp = &res,
  3878. };
  3879. unsigned short task_flags = 0;
  3880. if (nfs4_has_session(server->nfs_client))
  3881. task_flags = RPC_TASK_MOVEABLE;
  3882. /* Is this is an attribute revalidation, subject to softreval? */
  3883. if (inode && (server->flags & NFS_MOUNT_SOFTREVAL))
  3884. task_flags |= RPC_TASK_TIMEOUT;
  3885. nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0);
  3886. nfs_fattr_init(fattr);
  3887. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
  3888. return nfs4_do_call_sync(server->client, server, &msg,
  3889. &args.seq_args, &res.seq_res, task_flags);
  3890. }
  3891. int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
  3892. struct nfs_fattr *fattr, struct inode *inode)
  3893. {
  3894. struct nfs4_exception exception = {
  3895. .interruptible = true,
  3896. };
  3897. int err;
  3898. do {
  3899. err = _nfs4_proc_getattr(server, fhandle, fattr, inode);
  3900. trace_nfs4_getattr(server, fhandle, fattr, err);
  3901. err = nfs4_handle_exception(server, err,
  3902. &exception);
  3903. } while (exception.retry);
  3904. return err;
  3905. }
  3906. /*
  3907. * The file is not closed if it is opened due to the a request to change
  3908. * the size of the file. The open call will not be needed once the
  3909. * VFS layer lookup-intents are implemented.
  3910. *
  3911. * Close is called when the inode is destroyed.
  3912. * If we haven't opened the file for O_WRONLY, we
  3913. * need to in the size_change case to obtain a stateid.
  3914. *
  3915. * Got race?
  3916. * Because OPEN is always done by name in nfsv4, it is
  3917. * possible that we opened a different file by the same
  3918. * name. We can recognize this race condition, but we
  3919. * can't do anything about it besides returning an error.
  3920. *
  3921. * This will be fixed with VFS changes (lookup-intent).
  3922. */
  3923. static int
  3924. nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
  3925. struct iattr *sattr)
  3926. {
  3927. struct inode *inode = d_inode(dentry);
  3928. const struct cred *cred = NULL;
  3929. struct nfs_open_context *ctx = NULL;
  3930. int status;
  3931. if (pnfs_ld_layoutret_on_setattr(inode) &&
  3932. sattr->ia_valid & ATTR_SIZE &&
  3933. sattr->ia_size < i_size_read(inode))
  3934. pnfs_commit_and_return_layout(inode);
  3935. nfs_fattr_init(fattr);
  3936. /* Deal with open(O_TRUNC) */
  3937. if (sattr->ia_valid & ATTR_OPEN)
  3938. sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
  3939. /* Optimization: if the end result is no change, don't RPC */
  3940. if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
  3941. return 0;
  3942. /* Search for an existing open(O_WRITE) file */
  3943. if (sattr->ia_valid & ATTR_FILE) {
  3944. ctx = nfs_file_open_context(sattr->ia_file);
  3945. if (ctx)
  3946. cred = ctx->cred;
  3947. }
  3948. /* Return any delegations if we're going to change ACLs */
  3949. if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
  3950. nfs4_inode_make_writeable(inode);
  3951. status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL);
  3952. if (status == 0) {
  3953. nfs_setattr_update_inode(inode, sattr, fattr);
  3954. nfs_setsecurity(inode, fattr);
  3955. }
  3956. return status;
  3957. }
  3958. static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
  3959. struct dentry *dentry, struct nfs_fh *fhandle,
  3960. struct nfs_fattr *fattr)
  3961. {
  3962. struct nfs_server *server = NFS_SERVER(dir);
  3963. int status;
  3964. struct nfs4_lookup_arg args = {
  3965. .bitmask = server->attr_bitmask,
  3966. .dir_fh = NFS_FH(dir),
  3967. .name = &dentry->d_name,
  3968. };
  3969. struct nfs4_lookup_res res = {
  3970. .server = server,
  3971. .fattr = fattr,
  3972. .fh = fhandle,
  3973. };
  3974. struct rpc_message msg = {
  3975. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
  3976. .rpc_argp = &args,
  3977. .rpc_resp = &res,
  3978. };
  3979. unsigned short task_flags = 0;
  3980. if (nfs_server_capable(dir, NFS_CAP_MOVEABLE))
  3981. task_flags = RPC_TASK_MOVEABLE;
  3982. /* Is this is an attribute revalidation, subject to softreval? */
  3983. if (nfs_lookup_is_soft_revalidate(dentry))
  3984. task_flags |= RPC_TASK_TIMEOUT;
  3985. args.bitmask = nfs4_bitmask(server, fattr->label);
  3986. nfs_fattr_init(fattr);
  3987. dprintk("NFS call lookup %pd2\n", dentry);
  3988. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
  3989. status = nfs4_do_call_sync(clnt, server, &msg,
  3990. &args.seq_args, &res.seq_res, task_flags);
  3991. dprintk("NFS reply lookup: %d\n", status);
  3992. return status;
  3993. }
  3994. static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
  3995. {
  3996. fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
  3997. NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
  3998. fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
  3999. fattr->nlink = 2;
  4000. }
  4001. static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
  4002. struct dentry *dentry, struct nfs_fh *fhandle,
  4003. struct nfs_fattr *fattr)
  4004. {
  4005. struct nfs4_exception exception = {
  4006. .interruptible = true,
  4007. };
  4008. struct rpc_clnt *client = *clnt;
  4009. const struct qstr *name = &dentry->d_name;
  4010. int err;
  4011. do {
  4012. err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr);
  4013. trace_nfs4_lookup(dir, name, err);
  4014. switch (err) {
  4015. case -NFS4ERR_BADNAME:
  4016. err = -ENOENT;
  4017. goto out;
  4018. case -NFS4ERR_MOVED:
  4019. err = nfs4_get_referral(client, dir, name, fattr, fhandle);
  4020. if (err == -NFS4ERR_MOVED)
  4021. err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
  4022. goto out;
  4023. case -NFS4ERR_WRONGSEC:
  4024. err = -EPERM;
  4025. if (client != *clnt)
  4026. goto out;
  4027. client = nfs4_negotiate_security(client, dir, name);
  4028. if (IS_ERR(client))
  4029. return PTR_ERR(client);
  4030. exception.retry = 1;
  4031. break;
  4032. default:
  4033. err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
  4034. }
  4035. } while (exception.retry);
  4036. out:
  4037. if (err == 0)
  4038. *clnt = client;
  4039. else if (client != *clnt)
  4040. rpc_shutdown_client(client);
  4041. return err;
  4042. }
  4043. static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry,
  4044. struct nfs_fh *fhandle, struct nfs_fattr *fattr)
  4045. {
  4046. int status;
  4047. struct rpc_clnt *client = NFS_CLIENT(dir);
  4048. status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
  4049. if (client != NFS_CLIENT(dir)) {
  4050. rpc_shutdown_client(client);
  4051. nfs_fixup_secinfo_attributes(fattr);
  4052. }
  4053. return status;
  4054. }
  4055. struct rpc_clnt *
  4056. nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry,
  4057. struct nfs_fh *fhandle, struct nfs_fattr *fattr)
  4058. {
  4059. struct rpc_clnt *client = NFS_CLIENT(dir);
  4060. int status;
  4061. status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr);
  4062. if (status < 0)
  4063. return ERR_PTR(status);
  4064. return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
  4065. }
  4066. static int _nfs4_proc_lookupp(struct inode *inode,
  4067. struct nfs_fh *fhandle, struct nfs_fattr *fattr)
  4068. {
  4069. struct rpc_clnt *clnt = NFS_CLIENT(inode);
  4070. struct nfs_server *server = NFS_SERVER(inode);
  4071. int status;
  4072. struct nfs4_lookupp_arg args = {
  4073. .bitmask = server->attr_bitmask,
  4074. .fh = NFS_FH(inode),
  4075. };
  4076. struct nfs4_lookupp_res res = {
  4077. .server = server,
  4078. .fattr = fattr,
  4079. .fh = fhandle,
  4080. };
  4081. struct rpc_message msg = {
  4082. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
  4083. .rpc_argp = &args,
  4084. .rpc_resp = &res,
  4085. };
  4086. unsigned short task_flags = 0;
  4087. if (NFS_SERVER(inode)->flags & NFS_MOUNT_SOFTREVAL)
  4088. task_flags |= RPC_TASK_TIMEOUT;
  4089. args.bitmask = nfs4_bitmask(server, fattr->label);
  4090. nfs_fattr_init(fattr);
  4091. dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
  4092. status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
  4093. &res.seq_res, task_flags);
  4094. dprintk("NFS reply lookupp: %d\n", status);
  4095. return status;
  4096. }
  4097. static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
  4098. struct nfs_fattr *fattr)
  4099. {
  4100. struct nfs4_exception exception = {
  4101. .interruptible = true,
  4102. };
  4103. int err;
  4104. do {
  4105. err = _nfs4_proc_lookupp(inode, fhandle, fattr);
  4106. trace_nfs4_lookupp(inode, err);
  4107. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  4108. &exception);
  4109. } while (exception.retry);
  4110. return err;
  4111. }
  4112. static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
  4113. const struct cred *cred)
  4114. {
  4115. struct nfs_server *server = NFS_SERVER(inode);
  4116. struct nfs4_accessargs args = {
  4117. .fh = NFS_FH(inode),
  4118. .access = entry->mask,
  4119. };
  4120. struct nfs4_accessres res = {
  4121. .server = server,
  4122. };
  4123. struct rpc_message msg = {
  4124. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
  4125. .rpc_argp = &args,
  4126. .rpc_resp = &res,
  4127. .rpc_cred = cred,
  4128. };
  4129. int status = 0;
  4130. if (!nfs4_have_delegation(inode, FMODE_READ)) {
  4131. res.fattr = nfs_alloc_fattr();
  4132. if (res.fattr == NULL)
  4133. return -ENOMEM;
  4134. args.bitmask = server->cache_consistency_bitmask;
  4135. }
  4136. status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
  4137. if (!status) {
  4138. nfs_access_set_mask(entry, res.access);
  4139. if (res.fattr)
  4140. nfs_refresh_inode(inode, res.fattr);
  4141. }
  4142. nfs_free_fattr(res.fattr);
  4143. return status;
  4144. }
  4145. static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
  4146. const struct cred *cred)
  4147. {
  4148. struct nfs4_exception exception = {
  4149. .interruptible = true,
  4150. };
  4151. int err;
  4152. do {
  4153. err = _nfs4_proc_access(inode, entry, cred);
  4154. trace_nfs4_access(inode, err);
  4155. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  4156. &exception);
  4157. } while (exception.retry);
  4158. return err;
  4159. }
  4160. /*
  4161. * TODO: For the time being, we don't try to get any attributes
  4162. * along with any of the zero-copy operations READ, READDIR,
  4163. * READLINK, WRITE.
  4164. *
  4165. * In the case of the first three, we want to put the GETATTR
  4166. * after the read-type operation -- this is because it is hard
  4167. * to predict the length of a GETATTR response in v4, and thus
  4168. * align the READ data correctly. This means that the GETATTR
  4169. * may end up partially falling into the page cache, and we should
  4170. * shift it into the 'tail' of the xdr_buf before processing.
  4171. * To do this efficiently, we need to know the total length
  4172. * of data received, which doesn't seem to be available outside
  4173. * of the RPC layer.
  4174. *
  4175. * In the case of WRITE, we also want to put the GETATTR after
  4176. * the operation -- in this case because we want to make sure
  4177. * we get the post-operation mtime and size.
  4178. *
  4179. * Both of these changes to the XDR layer would in fact be quite
  4180. * minor, but I decided to leave them for a subsequent patch.
  4181. */
  4182. static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
  4183. unsigned int pgbase, unsigned int pglen)
  4184. {
  4185. struct nfs4_readlink args = {
  4186. .fh = NFS_FH(inode),
  4187. .pgbase = pgbase,
  4188. .pglen = pglen,
  4189. .pages = &page,
  4190. };
  4191. struct nfs4_readlink_res res;
  4192. struct rpc_message msg = {
  4193. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
  4194. .rpc_argp = &args,
  4195. .rpc_resp = &res,
  4196. };
  4197. return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
  4198. }
  4199. static int nfs4_proc_readlink(struct inode *inode, struct page *page,
  4200. unsigned int pgbase, unsigned int pglen)
  4201. {
  4202. struct nfs4_exception exception = {
  4203. .interruptible = true,
  4204. };
  4205. int err;
  4206. do {
  4207. err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
  4208. trace_nfs4_readlink(inode, err);
  4209. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  4210. &exception);
  4211. } while (exception.retry);
  4212. return err;
  4213. }
  4214. /*
  4215. * This is just for mknod. open(O_CREAT) will always do ->open_context().
  4216. */
  4217. static int
  4218. nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
  4219. int flags)
  4220. {
  4221. struct nfs_server *server = NFS_SERVER(dir);
  4222. struct nfs4_label l, *ilabel;
  4223. struct nfs_open_context *ctx;
  4224. struct nfs4_state *state;
  4225. int status = 0;
  4226. ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
  4227. if (IS_ERR(ctx))
  4228. return PTR_ERR(ctx);
  4229. ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
  4230. if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
  4231. sattr->ia_mode &= ~current_umask();
  4232. state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
  4233. if (IS_ERR(state)) {
  4234. status = PTR_ERR(state);
  4235. goto out;
  4236. }
  4237. out:
  4238. nfs4_label_release_security(ilabel);
  4239. put_nfs_open_context(ctx);
  4240. return status;
  4241. }
  4242. static int
  4243. _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype)
  4244. {
  4245. struct nfs_server *server = NFS_SERVER(dir);
  4246. struct nfs_removeargs args = {
  4247. .fh = NFS_FH(dir),
  4248. .name = *name,
  4249. };
  4250. struct nfs_removeres res = {
  4251. .server = server,
  4252. };
  4253. struct rpc_message msg = {
  4254. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
  4255. .rpc_argp = &args,
  4256. .rpc_resp = &res,
  4257. };
  4258. unsigned long timestamp = jiffies;
  4259. int status;
  4260. status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
  4261. if (status == 0) {
  4262. spin_lock(&dir->i_lock);
  4263. /* Removing a directory decrements nlink in the parent */
  4264. if (ftype == NF4DIR && dir->i_nlink > 2)
  4265. nfs4_dec_nlink_locked(dir);
  4266. nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp,
  4267. NFS_INO_INVALID_DATA);
  4268. spin_unlock(&dir->i_lock);
  4269. }
  4270. return status;
  4271. }
  4272. static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
  4273. {
  4274. struct nfs4_exception exception = {
  4275. .interruptible = true,
  4276. };
  4277. struct inode *inode = d_inode(dentry);
  4278. int err;
  4279. if (inode) {
  4280. if (inode->i_nlink == 1)
  4281. nfs4_inode_return_delegation(inode);
  4282. else
  4283. nfs4_inode_make_writeable(inode);
  4284. }
  4285. do {
  4286. err = _nfs4_proc_remove(dir, &dentry->d_name, NF4REG);
  4287. trace_nfs4_remove(dir, &dentry->d_name, err);
  4288. err = nfs4_handle_exception(NFS_SERVER(dir), err,
  4289. &exception);
  4290. } while (exception.retry);
  4291. return err;
  4292. }
  4293. static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
  4294. {
  4295. struct nfs4_exception exception = {
  4296. .interruptible = true,
  4297. };
  4298. int err;
  4299. do {
  4300. err = _nfs4_proc_remove(dir, name, NF4DIR);
  4301. trace_nfs4_remove(dir, name, err);
  4302. err = nfs4_handle_exception(NFS_SERVER(dir), err,
  4303. &exception);
  4304. } while (exception.retry);
  4305. return err;
  4306. }
  4307. static void nfs4_proc_unlink_setup(struct rpc_message *msg,
  4308. struct dentry *dentry,
  4309. struct inode *inode)
  4310. {
  4311. struct nfs_removeargs *args = msg->rpc_argp;
  4312. struct nfs_removeres *res = msg->rpc_resp;
  4313. res->server = NFS_SB(dentry->d_sb);
  4314. msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
  4315. nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0);
  4316. nfs_fattr_init(res->dir_attr);
  4317. if (inode) {
  4318. nfs4_inode_return_delegation(inode);
  4319. nfs_d_prune_case_insensitive_aliases(inode);
  4320. }
  4321. }
  4322. static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
  4323. {
  4324. nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
  4325. &data->args.seq_args,
  4326. &data->res.seq_res,
  4327. task);
  4328. }
  4329. static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
  4330. {
  4331. struct nfs_unlinkdata *data = task->tk_calldata;
  4332. struct nfs_removeres *res = &data->res;
  4333. if (!nfs4_sequence_done(task, &res->seq_res))
  4334. return 0;
  4335. if (nfs4_async_handle_error(task, res->server, NULL,
  4336. &data->timeout) == -EAGAIN)
  4337. return 0;
  4338. if (task->tk_status == 0)
  4339. nfs4_update_changeattr(dir, &res->cinfo,
  4340. res->dir_attr->time_start,
  4341. NFS_INO_INVALID_DATA);
  4342. return 1;
  4343. }
  4344. static void nfs4_proc_rename_setup(struct rpc_message *msg,
  4345. struct dentry *old_dentry,
  4346. struct dentry *new_dentry)
  4347. {
  4348. struct nfs_renameargs *arg = msg->rpc_argp;
  4349. struct nfs_renameres *res = msg->rpc_resp;
  4350. struct inode *old_inode = d_inode(old_dentry);
  4351. struct inode *new_inode = d_inode(new_dentry);
  4352. if (old_inode)
  4353. nfs4_inode_make_writeable(old_inode);
  4354. if (new_inode)
  4355. nfs4_inode_return_delegation(new_inode);
  4356. msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
  4357. res->server = NFS_SB(old_dentry->d_sb);
  4358. nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0);
  4359. }
  4360. static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
  4361. {
  4362. nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
  4363. &data->args.seq_args,
  4364. &data->res.seq_res,
  4365. task);
  4366. }
  4367. static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
  4368. struct inode *new_dir)
  4369. {
  4370. struct nfs_renamedata *data = task->tk_calldata;
  4371. struct nfs_renameres *res = &data->res;
  4372. if (!nfs4_sequence_done(task, &res->seq_res))
  4373. return 0;
  4374. if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
  4375. return 0;
  4376. if (task->tk_status == 0) {
  4377. nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
  4378. if (new_dir != old_dir) {
  4379. /* Note: If we moved a directory, nlink will change */
  4380. nfs4_update_changeattr(old_dir, &res->old_cinfo,
  4381. res->old_fattr->time_start,
  4382. NFS_INO_INVALID_NLINK |
  4383. NFS_INO_INVALID_DATA);
  4384. nfs4_update_changeattr(new_dir, &res->new_cinfo,
  4385. res->new_fattr->time_start,
  4386. NFS_INO_INVALID_NLINK |
  4387. NFS_INO_INVALID_DATA);
  4388. } else
  4389. nfs4_update_changeattr(old_dir, &res->old_cinfo,
  4390. res->old_fattr->time_start,
  4391. NFS_INO_INVALID_DATA);
  4392. }
  4393. return 1;
  4394. }
  4395. static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
  4396. {
  4397. struct nfs_server *server = NFS_SERVER(inode);
  4398. __u32 bitmask[NFS4_BITMASK_SZ];
  4399. struct nfs4_link_arg arg = {
  4400. .fh = NFS_FH(inode),
  4401. .dir_fh = NFS_FH(dir),
  4402. .name = name,
  4403. .bitmask = bitmask,
  4404. };
  4405. struct nfs4_link_res res = {
  4406. .server = server,
  4407. };
  4408. struct rpc_message msg = {
  4409. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
  4410. .rpc_argp = &arg,
  4411. .rpc_resp = &res,
  4412. };
  4413. int status = -ENOMEM;
  4414. res.fattr = nfs_alloc_fattr_with_label(server);
  4415. if (res.fattr == NULL)
  4416. goto out;
  4417. nfs4_inode_make_writeable(inode);
  4418. nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, res.fattr->label), inode,
  4419. NFS_INO_INVALID_CHANGE);
  4420. status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
  4421. if (!status) {
  4422. nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start,
  4423. NFS_INO_INVALID_DATA);
  4424. nfs4_inc_nlink(inode);
  4425. status = nfs_post_op_update_inode(inode, res.fattr);
  4426. if (!status)
  4427. nfs_setsecurity(inode, res.fattr);
  4428. }
  4429. out:
  4430. nfs_free_fattr(res.fattr);
  4431. return status;
  4432. }
  4433. static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
  4434. {
  4435. struct nfs4_exception exception = {
  4436. .interruptible = true,
  4437. };
  4438. int err;
  4439. do {
  4440. err = nfs4_handle_exception(NFS_SERVER(inode),
  4441. _nfs4_proc_link(inode, dir, name),
  4442. &exception);
  4443. } while (exception.retry);
  4444. return err;
  4445. }
  4446. struct nfs4_createdata {
  4447. struct rpc_message msg;
  4448. struct nfs4_create_arg arg;
  4449. struct nfs4_create_res res;
  4450. struct nfs_fh fh;
  4451. struct nfs_fattr fattr;
  4452. };
  4453. static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
  4454. const struct qstr *name, struct iattr *sattr, u32 ftype)
  4455. {
  4456. struct nfs4_createdata *data;
  4457. data = kzalloc(sizeof(*data), GFP_KERNEL);
  4458. if (data != NULL) {
  4459. struct nfs_server *server = NFS_SERVER(dir);
  4460. data->fattr.label = nfs4_label_alloc(server, GFP_KERNEL);
  4461. if (IS_ERR(data->fattr.label))
  4462. goto out_free;
  4463. data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
  4464. data->msg.rpc_argp = &data->arg;
  4465. data->msg.rpc_resp = &data->res;
  4466. data->arg.dir_fh = NFS_FH(dir);
  4467. data->arg.server = server;
  4468. data->arg.name = name;
  4469. data->arg.attrs = sattr;
  4470. data->arg.ftype = ftype;
  4471. data->arg.bitmask = nfs4_bitmask(server, data->fattr.label);
  4472. data->arg.umask = current_umask();
  4473. data->res.server = server;
  4474. data->res.fh = &data->fh;
  4475. data->res.fattr = &data->fattr;
  4476. nfs_fattr_init(data->res.fattr);
  4477. }
  4478. return data;
  4479. out_free:
  4480. kfree(data);
  4481. return NULL;
  4482. }
  4483. static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
  4484. {
  4485. int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
  4486. &data->arg.seq_args, &data->res.seq_res, 1);
  4487. if (status == 0) {
  4488. spin_lock(&dir->i_lock);
  4489. /* Creating a directory bumps nlink in the parent */
  4490. if (data->arg.ftype == NF4DIR)
  4491. nfs4_inc_nlink_locked(dir);
  4492. nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo,
  4493. data->res.fattr->time_start,
  4494. NFS_INO_INVALID_DATA);
  4495. spin_unlock(&dir->i_lock);
  4496. status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
  4497. }
  4498. return status;
  4499. }
  4500. static void nfs4_free_createdata(struct nfs4_createdata *data)
  4501. {
  4502. nfs4_label_free(data->fattr.label);
  4503. kfree(data);
  4504. }
  4505. static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
  4506. struct page *page, unsigned int len, struct iattr *sattr,
  4507. struct nfs4_label *label)
  4508. {
  4509. struct nfs4_createdata *data;
  4510. int status = -ENAMETOOLONG;
  4511. if (len > NFS4_MAXPATHLEN)
  4512. goto out;
  4513. status = -ENOMEM;
  4514. data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
  4515. if (data == NULL)
  4516. goto out;
  4517. data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
  4518. data->arg.u.symlink.pages = &page;
  4519. data->arg.u.symlink.len = len;
  4520. data->arg.label = label;
  4521. status = nfs4_do_create(dir, dentry, data);
  4522. nfs4_free_createdata(data);
  4523. out:
  4524. return status;
  4525. }
  4526. static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
  4527. struct page *page, unsigned int len, struct iattr *sattr)
  4528. {
  4529. struct nfs4_exception exception = {
  4530. .interruptible = true,
  4531. };
  4532. struct nfs4_label l, *label;
  4533. int err;
  4534. label = nfs4_label_init_security(dir, dentry, sattr, &l);
  4535. do {
  4536. err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
  4537. trace_nfs4_symlink(dir, &dentry->d_name, err);
  4538. err = nfs4_handle_exception(NFS_SERVER(dir), err,
  4539. &exception);
  4540. } while (exception.retry);
  4541. nfs4_label_release_security(label);
  4542. return err;
  4543. }
  4544. static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
  4545. struct iattr *sattr, struct nfs4_label *label)
  4546. {
  4547. struct nfs4_createdata *data;
  4548. int status = -ENOMEM;
  4549. data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
  4550. if (data == NULL)
  4551. goto out;
  4552. data->arg.label = label;
  4553. status = nfs4_do_create(dir, dentry, data);
  4554. nfs4_free_createdata(data);
  4555. out:
  4556. return status;
  4557. }
  4558. static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
  4559. struct iattr *sattr)
  4560. {
  4561. struct nfs_server *server = NFS_SERVER(dir);
  4562. struct nfs4_exception exception = {
  4563. .interruptible = true,
  4564. };
  4565. struct nfs4_label l, *label;
  4566. int err;
  4567. label = nfs4_label_init_security(dir, dentry, sattr, &l);
  4568. if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
  4569. sattr->ia_mode &= ~current_umask();
  4570. do {
  4571. err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
  4572. trace_nfs4_mkdir(dir, &dentry->d_name, err);
  4573. err = nfs4_handle_exception(NFS_SERVER(dir), err,
  4574. &exception);
  4575. } while (exception.retry);
  4576. nfs4_label_release_security(label);
  4577. return err;
  4578. }
  4579. static int _nfs4_proc_readdir(struct nfs_readdir_arg *nr_arg,
  4580. struct nfs_readdir_res *nr_res)
  4581. {
  4582. struct inode *dir = d_inode(nr_arg->dentry);
  4583. struct nfs_server *server = NFS_SERVER(dir);
  4584. struct nfs4_readdir_arg args = {
  4585. .fh = NFS_FH(dir),
  4586. .pages = nr_arg->pages,
  4587. .pgbase = 0,
  4588. .count = nr_arg->page_len,
  4589. .plus = nr_arg->plus,
  4590. };
  4591. struct nfs4_readdir_res res;
  4592. struct rpc_message msg = {
  4593. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
  4594. .rpc_argp = &args,
  4595. .rpc_resp = &res,
  4596. .rpc_cred = nr_arg->cred,
  4597. };
  4598. int status;
  4599. dprintk("%s: dentry = %pd2, cookie = %llu\n", __func__,
  4600. nr_arg->dentry, (unsigned long long)nr_arg->cookie);
  4601. if (!(server->caps & NFS_CAP_SECURITY_LABEL))
  4602. args.bitmask = server->attr_bitmask_nl;
  4603. else
  4604. args.bitmask = server->attr_bitmask;
  4605. nfs4_setup_readdir(nr_arg->cookie, nr_arg->verf, nr_arg->dentry, &args);
  4606. res.pgbase = args.pgbase;
  4607. status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
  4608. &res.seq_res, 0);
  4609. if (status >= 0) {
  4610. memcpy(nr_res->verf, res.verifier.data, NFS4_VERIFIER_SIZE);
  4611. status += args.pgbase;
  4612. }
  4613. nfs_invalidate_atime(dir);
  4614. dprintk("%s: returns %d\n", __func__, status);
  4615. return status;
  4616. }
  4617. static int nfs4_proc_readdir(struct nfs_readdir_arg *arg,
  4618. struct nfs_readdir_res *res)
  4619. {
  4620. struct nfs4_exception exception = {
  4621. .interruptible = true,
  4622. };
  4623. int err;
  4624. do {
  4625. err = _nfs4_proc_readdir(arg, res);
  4626. trace_nfs4_readdir(d_inode(arg->dentry), err);
  4627. err = nfs4_handle_exception(NFS_SERVER(d_inode(arg->dentry)),
  4628. err, &exception);
  4629. } while (exception.retry);
  4630. return err;
  4631. }
  4632. static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
  4633. struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
  4634. {
  4635. struct nfs4_createdata *data;
  4636. int mode = sattr->ia_mode;
  4637. int status = -ENOMEM;
  4638. data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
  4639. if (data == NULL)
  4640. goto out;
  4641. if (S_ISFIFO(mode))
  4642. data->arg.ftype = NF4FIFO;
  4643. else if (S_ISBLK(mode)) {
  4644. data->arg.ftype = NF4BLK;
  4645. data->arg.u.device.specdata1 = MAJOR(rdev);
  4646. data->arg.u.device.specdata2 = MINOR(rdev);
  4647. }
  4648. else if (S_ISCHR(mode)) {
  4649. data->arg.ftype = NF4CHR;
  4650. data->arg.u.device.specdata1 = MAJOR(rdev);
  4651. data->arg.u.device.specdata2 = MINOR(rdev);
  4652. } else if (!S_ISSOCK(mode)) {
  4653. status = -EINVAL;
  4654. goto out_free;
  4655. }
  4656. data->arg.label = label;
  4657. status = nfs4_do_create(dir, dentry, data);
  4658. out_free:
  4659. nfs4_free_createdata(data);
  4660. out:
  4661. return status;
  4662. }
  4663. static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
  4664. struct iattr *sattr, dev_t rdev)
  4665. {
  4666. struct nfs_server *server = NFS_SERVER(dir);
  4667. struct nfs4_exception exception = {
  4668. .interruptible = true,
  4669. };
  4670. struct nfs4_label l, *label;
  4671. int err;
  4672. label = nfs4_label_init_security(dir, dentry, sattr, &l);
  4673. if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
  4674. sattr->ia_mode &= ~current_umask();
  4675. do {
  4676. err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
  4677. trace_nfs4_mknod(dir, &dentry->d_name, err);
  4678. err = nfs4_handle_exception(NFS_SERVER(dir), err,
  4679. &exception);
  4680. } while (exception.retry);
  4681. nfs4_label_release_security(label);
  4682. return err;
  4683. }
  4684. static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
  4685. struct nfs_fsstat *fsstat)
  4686. {
  4687. struct nfs4_statfs_arg args = {
  4688. .fh = fhandle,
  4689. .bitmask = server->attr_bitmask,
  4690. };
  4691. struct nfs4_statfs_res res = {
  4692. .fsstat = fsstat,
  4693. };
  4694. struct rpc_message msg = {
  4695. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
  4696. .rpc_argp = &args,
  4697. .rpc_resp = &res,
  4698. };
  4699. nfs_fattr_init(fsstat->fattr);
  4700. return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
  4701. }
  4702. static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
  4703. {
  4704. struct nfs4_exception exception = {
  4705. .interruptible = true,
  4706. };
  4707. int err;
  4708. do {
  4709. err = nfs4_handle_exception(server,
  4710. _nfs4_proc_statfs(server, fhandle, fsstat),
  4711. &exception);
  4712. } while (exception.retry);
  4713. return err;
  4714. }
  4715. static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
  4716. struct nfs_fsinfo *fsinfo)
  4717. {
  4718. struct nfs4_fsinfo_arg args = {
  4719. .fh = fhandle,
  4720. .bitmask = server->attr_bitmask,
  4721. };
  4722. struct nfs4_fsinfo_res res = {
  4723. .fsinfo = fsinfo,
  4724. };
  4725. struct rpc_message msg = {
  4726. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
  4727. .rpc_argp = &args,
  4728. .rpc_resp = &res,
  4729. };
  4730. return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
  4731. }
  4732. static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
  4733. {
  4734. struct nfs4_exception exception = {
  4735. .interruptible = true,
  4736. };
  4737. int err;
  4738. do {
  4739. err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
  4740. trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
  4741. if (err == 0) {
  4742. nfs4_set_lease_period(server->nfs_client, fsinfo->lease_time * HZ);
  4743. break;
  4744. }
  4745. err = nfs4_handle_exception(server, err, &exception);
  4746. } while (exception.retry);
  4747. return err;
  4748. }
  4749. static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
  4750. {
  4751. int error;
  4752. nfs_fattr_init(fsinfo->fattr);
  4753. error = nfs4_do_fsinfo(server, fhandle, fsinfo);
  4754. if (error == 0) {
  4755. /* block layout checks this! */
  4756. server->pnfs_blksize = fsinfo->blksize;
  4757. set_pnfs_layoutdriver(server, fhandle, fsinfo);
  4758. }
  4759. return error;
  4760. }
  4761. static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
  4762. struct nfs_pathconf *pathconf)
  4763. {
  4764. struct nfs4_pathconf_arg args = {
  4765. .fh = fhandle,
  4766. .bitmask = server->attr_bitmask,
  4767. };
  4768. struct nfs4_pathconf_res res = {
  4769. .pathconf = pathconf,
  4770. };
  4771. struct rpc_message msg = {
  4772. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
  4773. .rpc_argp = &args,
  4774. .rpc_resp = &res,
  4775. };
  4776. /* None of the pathconf attributes are mandatory to implement */
  4777. if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
  4778. memset(pathconf, 0, sizeof(*pathconf));
  4779. return 0;
  4780. }
  4781. nfs_fattr_init(pathconf->fattr);
  4782. return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
  4783. }
  4784. static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
  4785. struct nfs_pathconf *pathconf)
  4786. {
  4787. struct nfs4_exception exception = {
  4788. .interruptible = true,
  4789. };
  4790. int err;
  4791. do {
  4792. err = nfs4_handle_exception(server,
  4793. _nfs4_proc_pathconf(server, fhandle, pathconf),
  4794. &exception);
  4795. } while (exception.retry);
  4796. return err;
  4797. }
  4798. int nfs4_set_rw_stateid(nfs4_stateid *stateid,
  4799. const struct nfs_open_context *ctx,
  4800. const struct nfs_lock_context *l_ctx,
  4801. fmode_t fmode)
  4802. {
  4803. return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
  4804. }
  4805. EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
  4806. static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
  4807. const struct nfs_open_context *ctx,
  4808. const struct nfs_lock_context *l_ctx,
  4809. fmode_t fmode)
  4810. {
  4811. nfs4_stateid _current_stateid;
  4812. /* If the current stateid represents a lost lock, then exit */
  4813. if (nfs4_set_rw_stateid(&_current_stateid, ctx, l_ctx, fmode) == -EIO)
  4814. return true;
  4815. return nfs4_stateid_match(stateid, &_current_stateid);
  4816. }
  4817. static bool nfs4_error_stateid_expired(int err)
  4818. {
  4819. switch (err) {
  4820. case -NFS4ERR_DELEG_REVOKED:
  4821. case -NFS4ERR_ADMIN_REVOKED:
  4822. case -NFS4ERR_BAD_STATEID:
  4823. case -NFS4ERR_STALE_STATEID:
  4824. case -NFS4ERR_OLD_STATEID:
  4825. case -NFS4ERR_OPENMODE:
  4826. case -NFS4ERR_EXPIRED:
  4827. return true;
  4828. }
  4829. return false;
  4830. }
  4831. static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
  4832. {
  4833. struct nfs_server *server = NFS_SERVER(hdr->inode);
  4834. trace_nfs4_read(hdr, task->tk_status);
  4835. if (task->tk_status < 0) {
  4836. struct nfs4_exception exception = {
  4837. .inode = hdr->inode,
  4838. .state = hdr->args.context->state,
  4839. .stateid = &hdr->args.stateid,
  4840. };
  4841. task->tk_status = nfs4_async_handle_exception(task,
  4842. server, task->tk_status, &exception);
  4843. if (exception.retry) {
  4844. rpc_restart_call_prepare(task);
  4845. return -EAGAIN;
  4846. }
  4847. }
  4848. if (task->tk_status > 0)
  4849. renew_lease(server, hdr->timestamp);
  4850. return 0;
  4851. }
  4852. static bool nfs4_read_stateid_changed(struct rpc_task *task,
  4853. struct nfs_pgio_args *args)
  4854. {
  4855. if (!nfs4_error_stateid_expired(task->tk_status) ||
  4856. nfs4_stateid_is_current(&args->stateid,
  4857. args->context,
  4858. args->lock_context,
  4859. FMODE_READ))
  4860. return false;
  4861. rpc_restart_call_prepare(task);
  4862. return true;
  4863. }
  4864. static bool nfs4_read_plus_not_supported(struct rpc_task *task,
  4865. struct nfs_pgio_header *hdr)
  4866. {
  4867. struct nfs_server *server = NFS_SERVER(hdr->inode);
  4868. struct rpc_message *msg = &task->tk_msg;
  4869. if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
  4870. server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
  4871. server->caps &= ~NFS_CAP_READ_PLUS;
  4872. msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
  4873. rpc_restart_call_prepare(task);
  4874. return true;
  4875. }
  4876. return false;
  4877. }
  4878. static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
  4879. {
  4880. if (!nfs4_sequence_done(task, &hdr->res.seq_res))
  4881. return -EAGAIN;
  4882. if (nfs4_read_stateid_changed(task, &hdr->args))
  4883. return -EAGAIN;
  4884. if (nfs4_read_plus_not_supported(task, hdr))
  4885. return -EAGAIN;
  4886. if (task->tk_status > 0)
  4887. nfs_invalidate_atime(hdr->inode);
  4888. return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
  4889. nfs4_read_done_cb(task, hdr);
  4890. }
  4891. #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
  4892. static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
  4893. struct rpc_message *msg)
  4894. {
  4895. /* Note: We don't use READ_PLUS with pNFS yet */
  4896. if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
  4897. msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
  4898. return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
  4899. }
  4900. return false;
  4901. }
  4902. #else
  4903. static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
  4904. struct rpc_message *msg)
  4905. {
  4906. return false;
  4907. }
  4908. #endif /* CONFIG_NFS_V4_2 */
  4909. static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
  4910. struct rpc_message *msg)
  4911. {
  4912. hdr->timestamp = jiffies;
  4913. if (!hdr->pgio_done_cb)
  4914. hdr->pgio_done_cb = nfs4_read_done_cb;
  4915. if (!nfs42_read_plus_support(hdr, msg))
  4916. msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
  4917. nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
  4918. }
  4919. static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
  4920. struct nfs_pgio_header *hdr)
  4921. {
  4922. if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
  4923. &hdr->args.seq_args,
  4924. &hdr->res.seq_res,
  4925. task))
  4926. return 0;
  4927. if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
  4928. hdr->args.lock_context,
  4929. hdr->rw_mode) == -EIO)
  4930. return -EIO;
  4931. if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
  4932. return -EIO;
  4933. return 0;
  4934. }
  4935. static int nfs4_write_done_cb(struct rpc_task *task,
  4936. struct nfs_pgio_header *hdr)
  4937. {
  4938. struct inode *inode = hdr->inode;
  4939. trace_nfs4_write(hdr, task->tk_status);
  4940. if (task->tk_status < 0) {
  4941. struct nfs4_exception exception = {
  4942. .inode = hdr->inode,
  4943. .state = hdr->args.context->state,
  4944. .stateid = &hdr->args.stateid,
  4945. };
  4946. task->tk_status = nfs4_async_handle_exception(task,
  4947. NFS_SERVER(inode), task->tk_status,
  4948. &exception);
  4949. if (exception.retry) {
  4950. rpc_restart_call_prepare(task);
  4951. return -EAGAIN;
  4952. }
  4953. }
  4954. if (task->tk_status >= 0) {
  4955. renew_lease(NFS_SERVER(inode), hdr->timestamp);
  4956. nfs_writeback_update_inode(hdr);
  4957. }
  4958. return 0;
  4959. }
  4960. static bool nfs4_write_stateid_changed(struct rpc_task *task,
  4961. struct nfs_pgio_args *args)
  4962. {
  4963. if (!nfs4_error_stateid_expired(task->tk_status) ||
  4964. nfs4_stateid_is_current(&args->stateid,
  4965. args->context,
  4966. args->lock_context,
  4967. FMODE_WRITE))
  4968. return false;
  4969. rpc_restart_call_prepare(task);
  4970. return true;
  4971. }
  4972. static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
  4973. {
  4974. if (!nfs4_sequence_done(task, &hdr->res.seq_res))
  4975. return -EAGAIN;
  4976. if (nfs4_write_stateid_changed(task, &hdr->args))
  4977. return -EAGAIN;
  4978. return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
  4979. nfs4_write_done_cb(task, hdr);
  4980. }
  4981. static
  4982. bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
  4983. {
  4984. /* Don't request attributes for pNFS or O_DIRECT writes */
  4985. if (hdr->ds_clp != NULL || hdr->dreq != NULL)
  4986. return false;
  4987. /* Otherwise, request attributes if and only if we don't hold
  4988. * a delegation
  4989. */
  4990. return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
  4991. }
  4992. void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
  4993. struct inode *inode, unsigned long cache_validity)
  4994. {
  4995. struct nfs_server *server = NFS_SERVER(inode);
  4996. unsigned int i;
  4997. memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
  4998. cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
  4999. if (cache_validity & NFS_INO_INVALID_CHANGE)
  5000. bitmask[0] |= FATTR4_WORD0_CHANGE;
  5001. if (cache_validity & NFS_INO_INVALID_ATIME)
  5002. bitmask[1] |= FATTR4_WORD1_TIME_ACCESS;
  5003. if (cache_validity & NFS_INO_INVALID_MODE)
  5004. bitmask[1] |= FATTR4_WORD1_MODE;
  5005. if (cache_validity & NFS_INO_INVALID_OTHER)
  5006. bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
  5007. if (cache_validity & NFS_INO_INVALID_NLINK)
  5008. bitmask[1] |= FATTR4_WORD1_NUMLINKS;
  5009. if (cache_validity & NFS_INO_INVALID_CTIME)
  5010. bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
  5011. if (cache_validity & NFS_INO_INVALID_MTIME)
  5012. bitmask[1] |= FATTR4_WORD1_TIME_MODIFY;
  5013. if (cache_validity & NFS_INO_INVALID_BLOCKS)
  5014. bitmask[1] |= FATTR4_WORD1_SPACE_USED;
  5015. if (cache_validity & NFS_INO_INVALID_SIZE)
  5016. bitmask[0] |= FATTR4_WORD0_SIZE;
  5017. for (i = 0; i < NFS4_BITMASK_SZ; i++)
  5018. bitmask[i] &= server->attr_bitmask[i];
  5019. }
  5020. static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
  5021. struct rpc_message *msg,
  5022. struct rpc_clnt **clnt)
  5023. {
  5024. struct nfs_server *server = NFS_SERVER(hdr->inode);
  5025. if (!nfs4_write_need_cache_consistency_data(hdr)) {
  5026. hdr->args.bitmask = NULL;
  5027. hdr->res.fattr = NULL;
  5028. } else {
  5029. nfs4_bitmask_set(hdr->args.bitmask_store,
  5030. server->cache_consistency_bitmask,
  5031. hdr->inode, NFS_INO_INVALID_BLOCKS);
  5032. hdr->args.bitmask = hdr->args.bitmask_store;
  5033. }
  5034. if (!hdr->pgio_done_cb)
  5035. hdr->pgio_done_cb = nfs4_write_done_cb;
  5036. hdr->res.server = server;
  5037. hdr->timestamp = jiffies;
  5038. msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
  5039. nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
  5040. nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
  5041. }
  5042. static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
  5043. {
  5044. nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
  5045. &data->args.seq_args,
  5046. &data->res.seq_res,
  5047. task);
  5048. }
  5049. static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
  5050. {
  5051. struct inode *inode = data->inode;
  5052. trace_nfs4_commit(data, task->tk_status);
  5053. if (nfs4_async_handle_error(task, NFS_SERVER(inode),
  5054. NULL, NULL) == -EAGAIN) {
  5055. rpc_restart_call_prepare(task);
  5056. return -EAGAIN;
  5057. }
  5058. return 0;
  5059. }
  5060. static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
  5061. {
  5062. if (!nfs4_sequence_done(task, &data->res.seq_res))
  5063. return -EAGAIN;
  5064. return data->commit_done_cb(task, data);
  5065. }
  5066. static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg,
  5067. struct rpc_clnt **clnt)
  5068. {
  5069. struct nfs_server *server = NFS_SERVER(data->inode);
  5070. if (data->commit_done_cb == NULL)
  5071. data->commit_done_cb = nfs4_commit_done_cb;
  5072. data->res.server = server;
  5073. msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
  5074. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
  5075. nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
  5076. NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
  5077. }
  5078. static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
  5079. struct nfs_commitres *res)
  5080. {
  5081. struct inode *dst_inode = file_inode(dst);
  5082. struct nfs_server *server = NFS_SERVER(dst_inode);
  5083. struct rpc_message msg = {
  5084. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
  5085. .rpc_argp = args,
  5086. .rpc_resp = res,
  5087. };
  5088. args->fh = NFS_FH(dst_inode);
  5089. return nfs4_call_sync(server->client, server, &msg,
  5090. &args->seq_args, &res->seq_res, 1);
  5091. }
  5092. int nfs4_proc_commit(struct file *dst, __u64 offset, __u32 count, struct nfs_commitres *res)
  5093. {
  5094. struct nfs_commitargs args = {
  5095. .offset = offset,
  5096. .count = count,
  5097. };
  5098. struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
  5099. struct nfs4_exception exception = { };
  5100. int status;
  5101. do {
  5102. status = _nfs4_proc_commit(dst, &args, res);
  5103. status = nfs4_handle_exception(dst_server, status, &exception);
  5104. } while (exception.retry);
  5105. return status;
  5106. }
  5107. struct nfs4_renewdata {
  5108. struct nfs_client *client;
  5109. unsigned long timestamp;
  5110. };
  5111. /*
  5112. * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
  5113. * standalone procedure for queueing an asynchronous RENEW.
  5114. */
  5115. static void nfs4_renew_release(void *calldata)
  5116. {
  5117. struct nfs4_renewdata *data = calldata;
  5118. struct nfs_client *clp = data->client;
  5119. if (refcount_read(&clp->cl_count) > 1)
  5120. nfs4_schedule_state_renewal(clp);
  5121. nfs_put_client(clp);
  5122. kfree(data);
  5123. }
  5124. static void nfs4_renew_done(struct rpc_task *task, void *calldata)
  5125. {
  5126. struct nfs4_renewdata *data = calldata;
  5127. struct nfs_client *clp = data->client;
  5128. unsigned long timestamp = data->timestamp;
  5129. trace_nfs4_renew_async(clp, task->tk_status);
  5130. switch (task->tk_status) {
  5131. case 0:
  5132. break;
  5133. case -NFS4ERR_LEASE_MOVED:
  5134. nfs4_schedule_lease_moved_recovery(clp);
  5135. break;
  5136. default:
  5137. /* Unless we're shutting down, schedule state recovery! */
  5138. if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
  5139. return;
  5140. if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
  5141. nfs4_schedule_lease_recovery(clp);
  5142. return;
  5143. }
  5144. nfs4_schedule_path_down_recovery(clp);
  5145. }
  5146. do_renew_lease(clp, timestamp);
  5147. }
  5148. static const struct rpc_call_ops nfs4_renew_ops = {
  5149. .rpc_call_done = nfs4_renew_done,
  5150. .rpc_release = nfs4_renew_release,
  5151. };
  5152. static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
  5153. {
  5154. struct rpc_message msg = {
  5155. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
  5156. .rpc_argp = clp,
  5157. .rpc_cred = cred,
  5158. };
  5159. struct nfs4_renewdata *data;
  5160. if (renew_flags == 0)
  5161. return 0;
  5162. if (!refcount_inc_not_zero(&clp->cl_count))
  5163. return -EIO;
  5164. data = kmalloc(sizeof(*data), GFP_NOFS);
  5165. if (data == NULL) {
  5166. nfs_put_client(clp);
  5167. return -ENOMEM;
  5168. }
  5169. data->client = clp;
  5170. data->timestamp = jiffies;
  5171. return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
  5172. &nfs4_renew_ops, data);
  5173. }
  5174. static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
  5175. {
  5176. struct rpc_message msg = {
  5177. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
  5178. .rpc_argp = clp,
  5179. .rpc_cred = cred,
  5180. };
  5181. unsigned long now = jiffies;
  5182. int status;
  5183. status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
  5184. if (status < 0)
  5185. return status;
  5186. do_renew_lease(clp, now);
  5187. return 0;
  5188. }
  5189. static bool nfs4_server_supports_acls(const struct nfs_server *server,
  5190. enum nfs4_acl_type type)
  5191. {
  5192. switch (type) {
  5193. default:
  5194. return server->attr_bitmask[0] & FATTR4_WORD0_ACL;
  5195. case NFS4ACL_DACL:
  5196. return server->attr_bitmask[1] & FATTR4_WORD1_DACL;
  5197. case NFS4ACL_SACL:
  5198. return server->attr_bitmask[1] & FATTR4_WORD1_SACL;
  5199. }
  5200. }
  5201. /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
  5202. * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
  5203. * the stack.
  5204. */
  5205. #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
  5206. int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
  5207. struct page **pages)
  5208. {
  5209. struct page *newpage, **spages;
  5210. int rc = 0;
  5211. size_t len;
  5212. spages = pages;
  5213. do {
  5214. len = min_t(size_t, PAGE_SIZE, buflen);
  5215. newpage = alloc_page(GFP_KERNEL);
  5216. if (newpage == NULL)
  5217. goto unwind;
  5218. memcpy(page_address(newpage), buf, len);
  5219. buf += len;
  5220. buflen -= len;
  5221. *pages++ = newpage;
  5222. rc++;
  5223. } while (buflen != 0);
  5224. return rc;
  5225. unwind:
  5226. for(; rc > 0; rc--)
  5227. __free_page(spages[rc-1]);
  5228. return -ENOMEM;
  5229. }
  5230. struct nfs4_cached_acl {
  5231. enum nfs4_acl_type type;
  5232. int cached;
  5233. size_t len;
  5234. char data[];
  5235. };
  5236. static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
  5237. {
  5238. struct nfs_inode *nfsi = NFS_I(inode);
  5239. spin_lock(&inode->i_lock);
  5240. kfree(nfsi->nfs4_acl);
  5241. nfsi->nfs4_acl = acl;
  5242. spin_unlock(&inode->i_lock);
  5243. }
  5244. static void nfs4_zap_acl_attr(struct inode *inode)
  5245. {
  5246. nfs4_set_cached_acl(inode, NULL);
  5247. }
  5248. static ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf,
  5249. size_t buflen, enum nfs4_acl_type type)
  5250. {
  5251. struct nfs_inode *nfsi = NFS_I(inode);
  5252. struct nfs4_cached_acl *acl;
  5253. int ret = -ENOENT;
  5254. spin_lock(&inode->i_lock);
  5255. acl = nfsi->nfs4_acl;
  5256. if (acl == NULL)
  5257. goto out;
  5258. if (acl->type != type)
  5259. goto out;
  5260. if (buf == NULL) /* user is just asking for length */
  5261. goto out_len;
  5262. if (acl->cached == 0)
  5263. goto out;
  5264. ret = -ERANGE; /* see getxattr(2) man page */
  5265. if (acl->len > buflen)
  5266. goto out;
  5267. memcpy(buf, acl->data, acl->len);
  5268. out_len:
  5269. ret = acl->len;
  5270. out:
  5271. spin_unlock(&inode->i_lock);
  5272. return ret;
  5273. }
  5274. static void nfs4_write_cached_acl(struct inode *inode, struct page **pages,
  5275. size_t pgbase, size_t acl_len,
  5276. enum nfs4_acl_type type)
  5277. {
  5278. struct nfs4_cached_acl *acl;
  5279. size_t buflen = sizeof(*acl) + acl_len;
  5280. if (buflen <= PAGE_SIZE) {
  5281. acl = kmalloc(buflen, GFP_KERNEL);
  5282. if (acl == NULL)
  5283. goto out;
  5284. acl->cached = 1;
  5285. _copy_from_pages(acl->data, pages, pgbase, acl_len);
  5286. } else {
  5287. acl = kmalloc(sizeof(*acl), GFP_KERNEL);
  5288. if (acl == NULL)
  5289. goto out;
  5290. acl->cached = 0;
  5291. }
  5292. acl->type = type;
  5293. acl->len = acl_len;
  5294. out:
  5295. nfs4_set_cached_acl(inode, acl);
  5296. }
  5297. /*
  5298. * The getxattr API returns the required buffer length when called with a
  5299. * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
  5300. * the required buf. On a NULL buf, we send a page of data to the server
  5301. * guessing that the ACL request can be serviced by a page. If so, we cache
  5302. * up to the page of ACL data, and the 2nd call to getxattr is serviced by
  5303. * the cache. If not so, we throw away the page, and cache the required
  5304. * length. The next getxattr call will then produce another round trip to
  5305. * the server, this time with the input buf of the required size.
  5306. */
  5307. static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf,
  5308. size_t buflen, enum nfs4_acl_type type)
  5309. {
  5310. struct page **pages;
  5311. struct nfs_getaclargs args = {
  5312. .fh = NFS_FH(inode),
  5313. .acl_type = type,
  5314. .acl_len = buflen,
  5315. };
  5316. struct nfs_getaclres res = {
  5317. .acl_type = type,
  5318. .acl_len = buflen,
  5319. };
  5320. struct rpc_message msg = {
  5321. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
  5322. .rpc_argp = &args,
  5323. .rpc_resp = &res,
  5324. };
  5325. unsigned int npages;
  5326. int ret = -ENOMEM, i;
  5327. struct nfs_server *server = NFS_SERVER(inode);
  5328. if (buflen == 0)
  5329. buflen = server->rsize;
  5330. npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
  5331. pages = kmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  5332. if (!pages)
  5333. return -ENOMEM;
  5334. args.acl_pages = pages;
  5335. for (i = 0; i < npages; i++) {
  5336. pages[i] = alloc_page(GFP_KERNEL);
  5337. if (!pages[i])
  5338. goto out_free;
  5339. }
  5340. /* for decoding across pages */
  5341. res.acl_scratch = alloc_page(GFP_KERNEL);
  5342. if (!res.acl_scratch)
  5343. goto out_free;
  5344. args.acl_len = npages * PAGE_SIZE;
  5345. dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
  5346. __func__, buf, buflen, npages, args.acl_len);
  5347. ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
  5348. &msg, &args.seq_args, &res.seq_res, 0);
  5349. if (ret)
  5350. goto out_free;
  5351. /* Handle the case where the passed-in buffer is too short */
  5352. if (res.acl_flags & NFS4_ACL_TRUNC) {
  5353. /* Did the user only issue a request for the acl length? */
  5354. if (buf == NULL)
  5355. goto out_ok;
  5356. ret = -ERANGE;
  5357. goto out_free;
  5358. }
  5359. nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len,
  5360. type);
  5361. if (buf) {
  5362. if (res.acl_len > buflen) {
  5363. ret = -ERANGE;
  5364. goto out_free;
  5365. }
  5366. _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
  5367. }
  5368. out_ok:
  5369. ret = res.acl_len;
  5370. out_free:
  5371. while (--i >= 0)
  5372. __free_page(pages[i]);
  5373. if (res.acl_scratch)
  5374. __free_page(res.acl_scratch);
  5375. kfree(pages);
  5376. return ret;
  5377. }
  5378. static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf,
  5379. size_t buflen, enum nfs4_acl_type type)
  5380. {
  5381. struct nfs4_exception exception = {
  5382. .interruptible = true,
  5383. };
  5384. ssize_t ret;
  5385. do {
  5386. ret = __nfs4_get_acl_uncached(inode, buf, buflen, type);
  5387. trace_nfs4_get_acl(inode, ret);
  5388. if (ret >= 0)
  5389. break;
  5390. ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
  5391. } while (exception.retry);
  5392. return ret;
  5393. }
  5394. static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen,
  5395. enum nfs4_acl_type type)
  5396. {
  5397. struct nfs_server *server = NFS_SERVER(inode);
  5398. int ret;
  5399. if (!nfs4_server_supports_acls(server, type))
  5400. return -EOPNOTSUPP;
  5401. ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
  5402. if (ret < 0)
  5403. return ret;
  5404. if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
  5405. nfs_zap_acl_cache(inode);
  5406. ret = nfs4_read_cached_acl(inode, buf, buflen, type);
  5407. if (ret != -ENOENT)
  5408. /* -ENOENT is returned if there is no ACL or if there is an ACL
  5409. * but no cached acl data, just the acl length */
  5410. return ret;
  5411. return nfs4_get_acl_uncached(inode, buf, buflen, type);
  5412. }
  5413. static int __nfs4_proc_set_acl(struct inode *inode, const void *buf,
  5414. size_t buflen, enum nfs4_acl_type type)
  5415. {
  5416. struct nfs_server *server = NFS_SERVER(inode);
  5417. struct page *pages[NFS4ACL_MAXPAGES];
  5418. struct nfs_setaclargs arg = {
  5419. .fh = NFS_FH(inode),
  5420. .acl_type = type,
  5421. .acl_len = buflen,
  5422. .acl_pages = pages,
  5423. };
  5424. struct nfs_setaclres res;
  5425. struct rpc_message msg = {
  5426. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
  5427. .rpc_argp = &arg,
  5428. .rpc_resp = &res,
  5429. };
  5430. unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
  5431. int ret, i;
  5432. /* You can't remove system.nfs4_acl: */
  5433. if (buflen == 0)
  5434. return -EINVAL;
  5435. if (!nfs4_server_supports_acls(server, type))
  5436. return -EOPNOTSUPP;
  5437. if (npages > ARRAY_SIZE(pages))
  5438. return -ERANGE;
  5439. i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages);
  5440. if (i < 0)
  5441. return i;
  5442. nfs4_inode_make_writeable(inode);
  5443. ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
  5444. /*
  5445. * Free each page after tx, so the only ref left is
  5446. * held by the network stack
  5447. */
  5448. for (; i > 0; i--)
  5449. put_page(pages[i-1]);
  5450. /*
  5451. * Acl update can result in inode attribute update.
  5452. * so mark the attribute cache invalid.
  5453. */
  5454. spin_lock(&inode->i_lock);
  5455. nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
  5456. NFS_INO_INVALID_CTIME |
  5457. NFS_INO_REVAL_FORCED);
  5458. spin_unlock(&inode->i_lock);
  5459. nfs_access_zap_cache(inode);
  5460. nfs_zap_acl_cache(inode);
  5461. return ret;
  5462. }
  5463. static int nfs4_proc_set_acl(struct inode *inode, const void *buf,
  5464. size_t buflen, enum nfs4_acl_type type)
  5465. {
  5466. struct nfs4_exception exception = { };
  5467. int err;
  5468. do {
  5469. err = __nfs4_proc_set_acl(inode, buf, buflen, type);
  5470. trace_nfs4_set_acl(inode, err);
  5471. if (err == -NFS4ERR_BADOWNER || err == -NFS4ERR_BADNAME) {
  5472. /*
  5473. * no need to retry since the kernel
  5474. * isn't involved in encoding the ACEs.
  5475. */
  5476. err = -EINVAL;
  5477. break;
  5478. }
  5479. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  5480. &exception);
  5481. } while (exception.retry);
  5482. return err;
  5483. }
  5484. #ifdef CONFIG_NFS_V4_SECURITY_LABEL
  5485. static int _nfs4_get_security_label(struct inode *inode, void *buf,
  5486. size_t buflen)
  5487. {
  5488. struct nfs_server *server = NFS_SERVER(inode);
  5489. struct nfs4_label label = {0, 0, buflen, buf};
  5490. u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
  5491. struct nfs_fattr fattr = {
  5492. .label = &label,
  5493. };
  5494. struct nfs4_getattr_arg arg = {
  5495. .fh = NFS_FH(inode),
  5496. .bitmask = bitmask,
  5497. };
  5498. struct nfs4_getattr_res res = {
  5499. .fattr = &fattr,
  5500. .server = server,
  5501. };
  5502. struct rpc_message msg = {
  5503. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
  5504. .rpc_argp = &arg,
  5505. .rpc_resp = &res,
  5506. };
  5507. int ret;
  5508. nfs_fattr_init(&fattr);
  5509. ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
  5510. if (ret)
  5511. return ret;
  5512. if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
  5513. return -ENOENT;
  5514. return label.len;
  5515. }
  5516. static int nfs4_get_security_label(struct inode *inode, void *buf,
  5517. size_t buflen)
  5518. {
  5519. struct nfs4_exception exception = {
  5520. .interruptible = true,
  5521. };
  5522. int err;
  5523. if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
  5524. return -EOPNOTSUPP;
  5525. do {
  5526. err = _nfs4_get_security_label(inode, buf, buflen);
  5527. trace_nfs4_get_security_label(inode, err);
  5528. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  5529. &exception);
  5530. } while (exception.retry);
  5531. return err;
  5532. }
  5533. static int _nfs4_do_set_security_label(struct inode *inode,
  5534. struct nfs4_label *ilabel,
  5535. struct nfs_fattr *fattr)
  5536. {
  5537. struct iattr sattr = {0};
  5538. struct nfs_server *server = NFS_SERVER(inode);
  5539. const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
  5540. struct nfs_setattrargs arg = {
  5541. .fh = NFS_FH(inode),
  5542. .iap = &sattr,
  5543. .server = server,
  5544. .bitmask = bitmask,
  5545. .label = ilabel,
  5546. };
  5547. struct nfs_setattrres res = {
  5548. .fattr = fattr,
  5549. .server = server,
  5550. };
  5551. struct rpc_message msg = {
  5552. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
  5553. .rpc_argp = &arg,
  5554. .rpc_resp = &res,
  5555. };
  5556. int status;
  5557. nfs4_stateid_copy(&arg.stateid, &zero_stateid);
  5558. status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
  5559. if (status)
  5560. dprintk("%s failed: %d\n", __func__, status);
  5561. return status;
  5562. }
  5563. static int nfs4_do_set_security_label(struct inode *inode,
  5564. struct nfs4_label *ilabel,
  5565. struct nfs_fattr *fattr)
  5566. {
  5567. struct nfs4_exception exception = { };
  5568. int err;
  5569. do {
  5570. err = _nfs4_do_set_security_label(inode, ilabel, fattr);
  5571. trace_nfs4_set_security_label(inode, err);
  5572. err = nfs4_handle_exception(NFS_SERVER(inode), err,
  5573. &exception);
  5574. } while (exception.retry);
  5575. return err;
  5576. }
  5577. static int
  5578. nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
  5579. {
  5580. struct nfs4_label ilabel = {0, 0, buflen, (char *)buf };
  5581. struct nfs_fattr *fattr;
  5582. int status;
  5583. if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
  5584. return -EOPNOTSUPP;
  5585. fattr = nfs_alloc_fattr_with_label(NFS_SERVER(inode));
  5586. if (fattr == NULL)
  5587. return -ENOMEM;
  5588. status = nfs4_do_set_security_label(inode, &ilabel, fattr);
  5589. if (status == 0)
  5590. nfs_setsecurity(inode, fattr);
  5591. return status;
  5592. }
  5593. #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
  5594. static void nfs4_init_boot_verifier(const struct nfs_client *clp,
  5595. nfs4_verifier *bootverf)
  5596. {
  5597. __be32 verf[2];
  5598. if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
  5599. /* An impossible timestamp guarantees this value
  5600. * will never match a generated boot time. */
  5601. verf[0] = cpu_to_be32(U32_MAX);
  5602. verf[1] = cpu_to_be32(U32_MAX);
  5603. } else {
  5604. struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
  5605. u64 ns = ktime_to_ns(nn->boot_time);
  5606. verf[0] = cpu_to_be32(ns >> 32);
  5607. verf[1] = cpu_to_be32(ns);
  5608. }
  5609. memcpy(bootverf->data, verf, sizeof(bootverf->data));
  5610. }
  5611. static size_t
  5612. nfs4_get_uniquifier(struct nfs_client *clp, char *buf, size_t buflen)
  5613. {
  5614. struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
  5615. struct nfs_netns_client *nn_clp = nn->nfs_client;
  5616. const char *id;
  5617. buf[0] = '\0';
  5618. if (nn_clp) {
  5619. rcu_read_lock();
  5620. id = rcu_dereference(nn_clp->identifier);
  5621. if (id)
  5622. strscpy(buf, id, buflen);
  5623. rcu_read_unlock();
  5624. }
  5625. if (nfs4_client_id_uniquifier[0] != '\0' && buf[0] == '\0')
  5626. strscpy(buf, nfs4_client_id_uniquifier, buflen);
  5627. return strlen(buf);
  5628. }
  5629. static int
  5630. nfs4_init_nonuniform_client_string(struct nfs_client *clp)
  5631. {
  5632. char buf[NFS4_CLIENT_ID_UNIQ_LEN];
  5633. size_t buflen;
  5634. size_t len;
  5635. char *str;
  5636. if (clp->cl_owner_id != NULL)
  5637. return 0;
  5638. rcu_read_lock();
  5639. len = 14 +
  5640. strlen(clp->cl_rpcclient->cl_nodename) +
  5641. 1 +
  5642. strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
  5643. 1;
  5644. rcu_read_unlock();
  5645. buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
  5646. if (buflen)
  5647. len += buflen + 1;
  5648. if (len > NFS4_OPAQUE_LIMIT + 1)
  5649. return -EINVAL;
  5650. /*
  5651. * Since this string is allocated at mount time, and held until the
  5652. * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
  5653. * about a memory-reclaim deadlock.
  5654. */
  5655. str = kmalloc(len, GFP_KERNEL);
  5656. if (!str)
  5657. return -ENOMEM;
  5658. rcu_read_lock();
  5659. if (buflen)
  5660. scnprintf(str, len, "Linux NFSv4.0 %s/%s/%s",
  5661. clp->cl_rpcclient->cl_nodename, buf,
  5662. rpc_peeraddr2str(clp->cl_rpcclient,
  5663. RPC_DISPLAY_ADDR));
  5664. else
  5665. scnprintf(str, len, "Linux NFSv4.0 %s/%s",
  5666. clp->cl_rpcclient->cl_nodename,
  5667. rpc_peeraddr2str(clp->cl_rpcclient,
  5668. RPC_DISPLAY_ADDR));
  5669. rcu_read_unlock();
  5670. clp->cl_owner_id = str;
  5671. return 0;
  5672. }
  5673. static int
  5674. nfs4_init_uniform_client_string(struct nfs_client *clp)
  5675. {
  5676. char buf[NFS4_CLIENT_ID_UNIQ_LEN];
  5677. size_t buflen;
  5678. size_t len;
  5679. char *str;
  5680. if (clp->cl_owner_id != NULL)
  5681. return 0;
  5682. len = 10 + 10 + 1 + 10 + 1 +
  5683. strlen(clp->cl_rpcclient->cl_nodename) + 1;
  5684. buflen = nfs4_get_uniquifier(clp, buf, sizeof(buf));
  5685. if (buflen)
  5686. len += buflen + 1;
  5687. if (len > NFS4_OPAQUE_LIMIT + 1)
  5688. return -EINVAL;
  5689. /*
  5690. * Since this string is allocated at mount time, and held until the
  5691. * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
  5692. * about a memory-reclaim deadlock.
  5693. */
  5694. str = kmalloc(len, GFP_KERNEL);
  5695. if (!str)
  5696. return -ENOMEM;
  5697. if (buflen)
  5698. scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
  5699. clp->rpc_ops->version, clp->cl_minorversion,
  5700. buf, clp->cl_rpcclient->cl_nodename);
  5701. else
  5702. scnprintf(str, len, "Linux NFSv%u.%u %s",
  5703. clp->rpc_ops->version, clp->cl_minorversion,
  5704. clp->cl_rpcclient->cl_nodename);
  5705. clp->cl_owner_id = str;
  5706. return 0;
  5707. }
  5708. /*
  5709. * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
  5710. * services. Advertise one based on the address family of the
  5711. * clientaddr.
  5712. */
  5713. static unsigned int
  5714. nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
  5715. {
  5716. if (strchr(clp->cl_ipaddr, ':') != NULL)
  5717. return scnprintf(buf, len, "tcp6");
  5718. else
  5719. return scnprintf(buf, len, "tcp");
  5720. }
  5721. static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
  5722. {
  5723. struct nfs4_setclientid *sc = calldata;
  5724. if (task->tk_status == 0)
  5725. sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
  5726. }
  5727. static const struct rpc_call_ops nfs4_setclientid_ops = {
  5728. .rpc_call_done = nfs4_setclientid_done,
  5729. };
  5730. /**
  5731. * nfs4_proc_setclientid - Negotiate client ID
  5732. * @clp: state data structure
  5733. * @program: RPC program for NFSv4 callback service
  5734. * @port: IP port number for NFS4 callback service
  5735. * @cred: credential to use for this call
  5736. * @res: where to place the result
  5737. *
  5738. * Returns zero, a negative errno, or a negative NFS4ERR status code.
  5739. */
  5740. int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
  5741. unsigned short port, const struct cred *cred,
  5742. struct nfs4_setclientid_res *res)
  5743. {
  5744. nfs4_verifier sc_verifier;
  5745. struct nfs4_setclientid setclientid = {
  5746. .sc_verifier = &sc_verifier,
  5747. .sc_prog = program,
  5748. .sc_clnt = clp,
  5749. };
  5750. struct rpc_message msg = {
  5751. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
  5752. .rpc_argp = &setclientid,
  5753. .rpc_resp = res,
  5754. .rpc_cred = cred,
  5755. };
  5756. struct rpc_task_setup task_setup_data = {
  5757. .rpc_client = clp->cl_rpcclient,
  5758. .rpc_message = &msg,
  5759. .callback_ops = &nfs4_setclientid_ops,
  5760. .callback_data = &setclientid,
  5761. .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
  5762. };
  5763. unsigned long now = jiffies;
  5764. int status;
  5765. /* nfs_client_id4 */
  5766. nfs4_init_boot_verifier(clp, &sc_verifier);
  5767. if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
  5768. status = nfs4_init_uniform_client_string(clp);
  5769. else
  5770. status = nfs4_init_nonuniform_client_string(clp);
  5771. if (status)
  5772. goto out;
  5773. /* cb_client4 */
  5774. setclientid.sc_netid_len =
  5775. nfs4_init_callback_netid(clp,
  5776. setclientid.sc_netid,
  5777. sizeof(setclientid.sc_netid));
  5778. setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
  5779. sizeof(setclientid.sc_uaddr), "%s.%u.%u",
  5780. clp->cl_ipaddr, port >> 8, port & 255);
  5781. dprintk("NFS call setclientid auth=%s, '%s'\n",
  5782. clp->cl_rpcclient->cl_auth->au_ops->au_name,
  5783. clp->cl_owner_id);
  5784. status = nfs4_call_sync_custom(&task_setup_data);
  5785. if (setclientid.sc_cred) {
  5786. kfree(clp->cl_acceptor);
  5787. clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
  5788. put_rpccred(setclientid.sc_cred);
  5789. }
  5790. if (status == 0)
  5791. do_renew_lease(clp, now);
  5792. out:
  5793. trace_nfs4_setclientid(clp, status);
  5794. dprintk("NFS reply setclientid: %d\n", status);
  5795. return status;
  5796. }
  5797. /**
  5798. * nfs4_proc_setclientid_confirm - Confirm client ID
  5799. * @clp: state data structure
  5800. * @arg: result of a previous SETCLIENTID
  5801. * @cred: credential to use for this call
  5802. *
  5803. * Returns zero, a negative errno, or a negative NFS4ERR status code.
  5804. */
  5805. int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
  5806. struct nfs4_setclientid_res *arg,
  5807. const struct cred *cred)
  5808. {
  5809. struct rpc_message msg = {
  5810. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
  5811. .rpc_argp = arg,
  5812. .rpc_cred = cred,
  5813. };
  5814. int status;
  5815. dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
  5816. clp->cl_rpcclient->cl_auth->au_ops->au_name,
  5817. clp->cl_clientid);
  5818. status = rpc_call_sync(clp->cl_rpcclient, &msg,
  5819. RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
  5820. trace_nfs4_setclientid_confirm(clp, status);
  5821. dprintk("NFS reply setclientid_confirm: %d\n", status);
  5822. return status;
  5823. }
  5824. struct nfs4_delegreturndata {
  5825. struct nfs4_delegreturnargs args;
  5826. struct nfs4_delegreturnres res;
  5827. struct nfs_fh fh;
  5828. nfs4_stateid stateid;
  5829. unsigned long timestamp;
  5830. struct {
  5831. struct nfs4_layoutreturn_args arg;
  5832. struct nfs4_layoutreturn_res res;
  5833. struct nfs4_xdr_opaque_data ld_private;
  5834. u32 roc_barrier;
  5835. bool roc;
  5836. } lr;
  5837. struct nfs_fattr fattr;
  5838. int rpc_status;
  5839. struct inode *inode;
  5840. };
  5841. static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
  5842. {
  5843. struct nfs4_delegreturndata *data = calldata;
  5844. struct nfs4_exception exception = {
  5845. .inode = data->inode,
  5846. .stateid = &data->stateid,
  5847. .task_is_privileged = data->args.seq_args.sa_privileged,
  5848. };
  5849. if (!nfs4_sequence_done(task, &data->res.seq_res))
  5850. return;
  5851. trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
  5852. /* Handle Layoutreturn errors */
  5853. if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
  5854. &data->res.lr_ret) == -EAGAIN)
  5855. goto out_restart;
  5856. switch (task->tk_status) {
  5857. case 0:
  5858. renew_lease(data->res.server, data->timestamp);
  5859. break;
  5860. case -NFS4ERR_ADMIN_REVOKED:
  5861. case -NFS4ERR_DELEG_REVOKED:
  5862. case -NFS4ERR_EXPIRED:
  5863. nfs4_free_revoked_stateid(data->res.server,
  5864. data->args.stateid,
  5865. task->tk_msg.rpc_cred);
  5866. fallthrough;
  5867. case -NFS4ERR_BAD_STATEID:
  5868. case -NFS4ERR_STALE_STATEID:
  5869. case -ETIMEDOUT:
  5870. task->tk_status = 0;
  5871. break;
  5872. case -NFS4ERR_OLD_STATEID:
  5873. if (!nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
  5874. nfs4_stateid_seqid_inc(&data->stateid);
  5875. if (data->args.bitmask) {
  5876. data->args.bitmask = NULL;
  5877. data->res.fattr = NULL;
  5878. }
  5879. goto out_restart;
  5880. case -NFS4ERR_ACCESS:
  5881. if (data->args.bitmask) {
  5882. data->args.bitmask = NULL;
  5883. data->res.fattr = NULL;
  5884. goto out_restart;
  5885. }
  5886. fallthrough;
  5887. default:
  5888. task->tk_status = nfs4_async_handle_exception(task,
  5889. data->res.server, task->tk_status,
  5890. &exception);
  5891. if (exception.retry)
  5892. goto out_restart;
  5893. }
  5894. nfs_delegation_mark_returned(data->inode, data->args.stateid);
  5895. data->rpc_status = task->tk_status;
  5896. return;
  5897. out_restart:
  5898. task->tk_status = 0;
  5899. rpc_restart_call_prepare(task);
  5900. }
  5901. static void nfs4_delegreturn_release(void *calldata)
  5902. {
  5903. struct nfs4_delegreturndata *data = calldata;
  5904. struct inode *inode = data->inode;
  5905. if (data->lr.roc)
  5906. pnfs_roc_release(&data->lr.arg, &data->lr.res,
  5907. data->res.lr_ret);
  5908. if (inode) {
  5909. nfs4_fattr_set_prechange(&data->fattr,
  5910. inode_peek_iversion_raw(inode));
  5911. nfs_refresh_inode(inode, &data->fattr);
  5912. nfs_iput_and_deactive(inode);
  5913. }
  5914. kfree(calldata);
  5915. }
  5916. static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
  5917. {
  5918. struct nfs4_delegreturndata *d_data;
  5919. struct pnfs_layout_hdr *lo;
  5920. d_data = data;
  5921. if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
  5922. nfs4_sequence_done(task, &d_data->res.seq_res);
  5923. return;
  5924. }
  5925. lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
  5926. if (lo && !pnfs_layout_is_valid(lo)) {
  5927. d_data->args.lr_args = NULL;
  5928. d_data->res.lr_res = NULL;
  5929. }
  5930. nfs4_setup_sequence(d_data->res.server->nfs_client,
  5931. &d_data->args.seq_args,
  5932. &d_data->res.seq_res,
  5933. task);
  5934. }
  5935. static const struct rpc_call_ops nfs4_delegreturn_ops = {
  5936. .rpc_call_prepare = nfs4_delegreturn_prepare,
  5937. .rpc_call_done = nfs4_delegreturn_done,
  5938. .rpc_release = nfs4_delegreturn_release,
  5939. };
  5940. static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
  5941. {
  5942. struct nfs4_delegreturndata *data;
  5943. struct nfs_server *server = NFS_SERVER(inode);
  5944. struct rpc_task *task;
  5945. struct rpc_message msg = {
  5946. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
  5947. .rpc_cred = cred,
  5948. };
  5949. struct rpc_task_setup task_setup_data = {
  5950. .rpc_client = server->client,
  5951. .rpc_message = &msg,
  5952. .callback_ops = &nfs4_delegreturn_ops,
  5953. .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
  5954. };
  5955. int status = 0;
  5956. if (nfs_server_capable(inode, NFS_CAP_MOVEABLE))
  5957. task_setup_data.flags |= RPC_TASK_MOVEABLE;
  5958. data = kzalloc(sizeof(*data), GFP_KERNEL);
  5959. if (data == NULL)
  5960. return -ENOMEM;
  5961. nfs4_state_protect(server->nfs_client,
  5962. NFS_SP4_MACH_CRED_CLEANUP,
  5963. &task_setup_data.rpc_client, &msg);
  5964. data->args.fhandle = &data->fh;
  5965. data->args.stateid = &data->stateid;
  5966. nfs4_bitmask_set(data->args.bitmask_store,
  5967. server->cache_consistency_bitmask, inode, 0);
  5968. data->args.bitmask = data->args.bitmask_store;
  5969. nfs_copy_fh(&data->fh, NFS_FH(inode));
  5970. nfs4_stateid_copy(&data->stateid, stateid);
  5971. data->res.fattr = &data->fattr;
  5972. data->res.server = server;
  5973. data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
  5974. data->lr.arg.ld_private = &data->lr.ld_private;
  5975. nfs_fattr_init(data->res.fattr);
  5976. data->timestamp = jiffies;
  5977. data->rpc_status = 0;
  5978. data->inode = nfs_igrab_and_active(inode);
  5979. if (data->inode || issync) {
  5980. data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
  5981. cred);
  5982. if (data->lr.roc) {
  5983. data->args.lr_args = &data->lr.arg;
  5984. data->res.lr_res = &data->lr.res;
  5985. }
  5986. }
  5987. if (!data->inode)
  5988. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
  5989. 1);
  5990. else
  5991. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1,
  5992. 0);
  5993. task_setup_data.callback_data = data;
  5994. msg.rpc_argp = &data->args;
  5995. msg.rpc_resp = &data->res;
  5996. task = rpc_run_task(&task_setup_data);
  5997. if (IS_ERR(task))
  5998. return PTR_ERR(task);
  5999. if (!issync)
  6000. goto out;
  6001. status = rpc_wait_for_completion_task(task);
  6002. if (status != 0)
  6003. goto out;
  6004. status = data->rpc_status;
  6005. out:
  6006. rpc_put_task(task);
  6007. return status;
  6008. }
  6009. int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync)
  6010. {
  6011. struct nfs_server *server = NFS_SERVER(inode);
  6012. struct nfs4_exception exception = { };
  6013. int err;
  6014. do {
  6015. err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
  6016. trace_nfs4_delegreturn(inode, stateid, err);
  6017. switch (err) {
  6018. case -NFS4ERR_STALE_STATEID:
  6019. case -NFS4ERR_EXPIRED:
  6020. case 0:
  6021. return 0;
  6022. }
  6023. err = nfs4_handle_exception(server, err, &exception);
  6024. } while (exception.retry);
  6025. return err;
  6026. }
  6027. static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
  6028. {
  6029. struct inode *inode = state->inode;
  6030. struct nfs_server *server = NFS_SERVER(inode);
  6031. struct nfs_client *clp = server->nfs_client;
  6032. struct nfs_lockt_args arg = {
  6033. .fh = NFS_FH(inode),
  6034. .fl = request,
  6035. };
  6036. struct nfs_lockt_res res = {
  6037. .denied = request,
  6038. };
  6039. struct rpc_message msg = {
  6040. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
  6041. .rpc_argp = &arg,
  6042. .rpc_resp = &res,
  6043. .rpc_cred = state->owner->so_cred,
  6044. };
  6045. struct nfs4_lock_state *lsp;
  6046. int status;
  6047. arg.lock_owner.clientid = clp->cl_clientid;
  6048. status = nfs4_set_lock_state(state, request);
  6049. if (status != 0)
  6050. goto out;
  6051. lsp = request->fl_u.nfs4_fl.owner;
  6052. arg.lock_owner.id = lsp->ls_seqid.owner_id;
  6053. arg.lock_owner.s_dev = server->s_dev;
  6054. status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
  6055. switch (status) {
  6056. case 0:
  6057. request->fl_type = F_UNLCK;
  6058. break;
  6059. case -NFS4ERR_DENIED:
  6060. status = 0;
  6061. }
  6062. request->fl_ops->fl_release_private(request);
  6063. request->fl_ops = NULL;
  6064. out:
  6065. return status;
  6066. }
  6067. static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
  6068. {
  6069. struct nfs4_exception exception = {
  6070. .interruptible = true,
  6071. };
  6072. int err;
  6073. do {
  6074. err = _nfs4_proc_getlk(state, cmd, request);
  6075. trace_nfs4_get_lock(request, state, cmd, err);
  6076. err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
  6077. &exception);
  6078. } while (exception.retry);
  6079. return err;
  6080. }
  6081. /*
  6082. * Update the seqid of a lock stateid after receiving
  6083. * NFS4ERR_OLD_STATEID
  6084. */
  6085. static bool nfs4_refresh_lock_old_stateid(nfs4_stateid *dst,
  6086. struct nfs4_lock_state *lsp)
  6087. {
  6088. struct nfs4_state *state = lsp->ls_state;
  6089. bool ret = false;
  6090. spin_lock(&state->state_lock);
  6091. if (!nfs4_stateid_match_other(dst, &lsp->ls_stateid))
  6092. goto out;
  6093. if (!nfs4_stateid_is_newer(&lsp->ls_stateid, dst))
  6094. nfs4_stateid_seqid_inc(dst);
  6095. else
  6096. dst->seqid = lsp->ls_stateid.seqid;
  6097. ret = true;
  6098. out:
  6099. spin_unlock(&state->state_lock);
  6100. return ret;
  6101. }
  6102. static bool nfs4_sync_lock_stateid(nfs4_stateid *dst,
  6103. struct nfs4_lock_state *lsp)
  6104. {
  6105. struct nfs4_state *state = lsp->ls_state;
  6106. bool ret;
  6107. spin_lock(&state->state_lock);
  6108. ret = !nfs4_stateid_match_other(dst, &lsp->ls_stateid);
  6109. nfs4_stateid_copy(dst, &lsp->ls_stateid);
  6110. spin_unlock(&state->state_lock);
  6111. return ret;
  6112. }
  6113. struct nfs4_unlockdata {
  6114. struct nfs_locku_args arg;
  6115. struct nfs_locku_res res;
  6116. struct nfs4_lock_state *lsp;
  6117. struct nfs_open_context *ctx;
  6118. struct nfs_lock_context *l_ctx;
  6119. struct file_lock fl;
  6120. struct nfs_server *server;
  6121. unsigned long timestamp;
  6122. };
  6123. static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
  6124. struct nfs_open_context *ctx,
  6125. struct nfs4_lock_state *lsp,
  6126. struct nfs_seqid *seqid)
  6127. {
  6128. struct nfs4_unlockdata *p;
  6129. struct nfs4_state *state = lsp->ls_state;
  6130. struct inode *inode = state->inode;
  6131. p = kzalloc(sizeof(*p), GFP_KERNEL);
  6132. if (p == NULL)
  6133. return NULL;
  6134. p->arg.fh = NFS_FH(inode);
  6135. p->arg.fl = &p->fl;
  6136. p->arg.seqid = seqid;
  6137. p->res.seqid = seqid;
  6138. p->lsp = lsp;
  6139. /* Ensure we don't close file until we're done freeing locks! */
  6140. p->ctx = get_nfs_open_context(ctx);
  6141. p->l_ctx = nfs_get_lock_context(ctx);
  6142. locks_init_lock(&p->fl);
  6143. locks_copy_lock(&p->fl, fl);
  6144. p->server = NFS_SERVER(inode);
  6145. spin_lock(&state->state_lock);
  6146. nfs4_stateid_copy(&p->arg.stateid, &lsp->ls_stateid);
  6147. spin_unlock(&state->state_lock);
  6148. return p;
  6149. }
  6150. static void nfs4_locku_release_calldata(void *data)
  6151. {
  6152. struct nfs4_unlockdata *calldata = data;
  6153. nfs_free_seqid(calldata->arg.seqid);
  6154. nfs4_put_lock_state(calldata->lsp);
  6155. nfs_put_lock_context(calldata->l_ctx);
  6156. put_nfs_open_context(calldata->ctx);
  6157. kfree(calldata);
  6158. }
  6159. static void nfs4_locku_done(struct rpc_task *task, void *data)
  6160. {
  6161. struct nfs4_unlockdata *calldata = data;
  6162. struct nfs4_exception exception = {
  6163. .inode = calldata->lsp->ls_state->inode,
  6164. .stateid = &calldata->arg.stateid,
  6165. };
  6166. if (!nfs4_sequence_done(task, &calldata->res.seq_res))
  6167. return;
  6168. switch (task->tk_status) {
  6169. case 0:
  6170. renew_lease(calldata->server, calldata->timestamp);
  6171. locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
  6172. if (nfs4_update_lock_stateid(calldata->lsp,
  6173. &calldata->res.stateid))
  6174. break;
  6175. fallthrough;
  6176. case -NFS4ERR_ADMIN_REVOKED:
  6177. case -NFS4ERR_EXPIRED:
  6178. nfs4_free_revoked_stateid(calldata->server,
  6179. &calldata->arg.stateid,
  6180. task->tk_msg.rpc_cred);
  6181. fallthrough;
  6182. case -NFS4ERR_BAD_STATEID:
  6183. case -NFS4ERR_STALE_STATEID:
  6184. if (nfs4_sync_lock_stateid(&calldata->arg.stateid,
  6185. calldata->lsp))
  6186. rpc_restart_call_prepare(task);
  6187. break;
  6188. case -NFS4ERR_OLD_STATEID:
  6189. if (nfs4_refresh_lock_old_stateid(&calldata->arg.stateid,
  6190. calldata->lsp))
  6191. rpc_restart_call_prepare(task);
  6192. break;
  6193. default:
  6194. task->tk_status = nfs4_async_handle_exception(task,
  6195. calldata->server, task->tk_status,
  6196. &exception);
  6197. if (exception.retry)
  6198. rpc_restart_call_prepare(task);
  6199. }
  6200. nfs_release_seqid(calldata->arg.seqid);
  6201. }
  6202. static void nfs4_locku_prepare(struct rpc_task *task, void *data)
  6203. {
  6204. struct nfs4_unlockdata *calldata = data;
  6205. if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
  6206. nfs_async_iocounter_wait(task, calldata->l_ctx))
  6207. return;
  6208. if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
  6209. goto out_wait;
  6210. if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
  6211. /* Note: exit _without_ running nfs4_locku_done */
  6212. goto out_no_action;
  6213. }
  6214. calldata->timestamp = jiffies;
  6215. if (nfs4_setup_sequence(calldata->server->nfs_client,
  6216. &calldata->arg.seq_args,
  6217. &calldata->res.seq_res,
  6218. task) != 0)
  6219. nfs_release_seqid(calldata->arg.seqid);
  6220. return;
  6221. out_no_action:
  6222. task->tk_action = NULL;
  6223. out_wait:
  6224. nfs4_sequence_done(task, &calldata->res.seq_res);
  6225. }
  6226. static const struct rpc_call_ops nfs4_locku_ops = {
  6227. .rpc_call_prepare = nfs4_locku_prepare,
  6228. .rpc_call_done = nfs4_locku_done,
  6229. .rpc_release = nfs4_locku_release_calldata,
  6230. };
  6231. static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
  6232. struct nfs_open_context *ctx,
  6233. struct nfs4_lock_state *lsp,
  6234. struct nfs_seqid *seqid)
  6235. {
  6236. struct nfs4_unlockdata *data;
  6237. struct rpc_message msg = {
  6238. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
  6239. .rpc_cred = ctx->cred,
  6240. };
  6241. struct rpc_task_setup task_setup_data = {
  6242. .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
  6243. .rpc_message = &msg,
  6244. .callback_ops = &nfs4_locku_ops,
  6245. .workqueue = nfsiod_workqueue,
  6246. .flags = RPC_TASK_ASYNC,
  6247. };
  6248. if (nfs_server_capable(lsp->ls_state->inode, NFS_CAP_MOVEABLE))
  6249. task_setup_data.flags |= RPC_TASK_MOVEABLE;
  6250. nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
  6251. NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
  6252. /* Ensure this is an unlock - when canceling a lock, the
  6253. * canceled lock is passed in, and it won't be an unlock.
  6254. */
  6255. fl->fl_type = F_UNLCK;
  6256. if (fl->fl_flags & FL_CLOSE)
  6257. set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
  6258. data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
  6259. if (data == NULL) {
  6260. nfs_free_seqid(seqid);
  6261. return ERR_PTR(-ENOMEM);
  6262. }
  6263. nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1, 0);
  6264. msg.rpc_argp = &data->arg;
  6265. msg.rpc_resp = &data->res;
  6266. task_setup_data.callback_data = data;
  6267. return rpc_run_task(&task_setup_data);
  6268. }
  6269. static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
  6270. {
  6271. struct inode *inode = state->inode;
  6272. struct nfs4_state_owner *sp = state->owner;
  6273. struct nfs_inode *nfsi = NFS_I(inode);
  6274. struct nfs_seqid *seqid;
  6275. struct nfs4_lock_state *lsp;
  6276. struct rpc_task *task;
  6277. struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
  6278. int status = 0;
  6279. unsigned char fl_flags = request->fl_flags;
  6280. status = nfs4_set_lock_state(state, request);
  6281. /* Unlock _before_ we do the RPC call */
  6282. request->fl_flags |= FL_EXISTS;
  6283. /* Exclude nfs_delegation_claim_locks() */
  6284. mutex_lock(&sp->so_delegreturn_mutex);
  6285. /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
  6286. down_read(&nfsi->rwsem);
  6287. if (locks_lock_inode_wait(inode, request) == -ENOENT) {
  6288. up_read(&nfsi->rwsem);
  6289. mutex_unlock(&sp->so_delegreturn_mutex);
  6290. goto out;
  6291. }
  6292. up_read(&nfsi->rwsem);
  6293. mutex_unlock(&sp->so_delegreturn_mutex);
  6294. if (status != 0)
  6295. goto out;
  6296. /* Is this a delegated lock? */
  6297. lsp = request->fl_u.nfs4_fl.owner;
  6298. if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
  6299. goto out;
  6300. alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
  6301. seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
  6302. status = -ENOMEM;
  6303. if (IS_ERR(seqid))
  6304. goto out;
  6305. task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
  6306. status = PTR_ERR(task);
  6307. if (IS_ERR(task))
  6308. goto out;
  6309. status = rpc_wait_for_completion_task(task);
  6310. rpc_put_task(task);
  6311. out:
  6312. request->fl_flags = fl_flags;
  6313. trace_nfs4_unlock(request, state, F_SETLK, status);
  6314. return status;
  6315. }
  6316. struct nfs4_lockdata {
  6317. struct nfs_lock_args arg;
  6318. struct nfs_lock_res res;
  6319. struct nfs4_lock_state *lsp;
  6320. struct nfs_open_context *ctx;
  6321. struct file_lock fl;
  6322. unsigned long timestamp;
  6323. int rpc_status;
  6324. int cancelled;
  6325. struct nfs_server *server;
  6326. };
  6327. static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
  6328. struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
  6329. gfp_t gfp_mask)
  6330. {
  6331. struct nfs4_lockdata *p;
  6332. struct inode *inode = lsp->ls_state->inode;
  6333. struct nfs_server *server = NFS_SERVER(inode);
  6334. struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
  6335. p = kzalloc(sizeof(*p), gfp_mask);
  6336. if (p == NULL)
  6337. return NULL;
  6338. p->arg.fh = NFS_FH(inode);
  6339. p->arg.fl = &p->fl;
  6340. p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
  6341. if (IS_ERR(p->arg.open_seqid))
  6342. goto out_free;
  6343. alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
  6344. p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
  6345. if (IS_ERR(p->arg.lock_seqid))
  6346. goto out_free_seqid;
  6347. p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
  6348. p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
  6349. p->arg.lock_owner.s_dev = server->s_dev;
  6350. p->res.lock_seqid = p->arg.lock_seqid;
  6351. p->lsp = lsp;
  6352. p->server = server;
  6353. p->ctx = get_nfs_open_context(ctx);
  6354. locks_init_lock(&p->fl);
  6355. locks_copy_lock(&p->fl, fl);
  6356. return p;
  6357. out_free_seqid:
  6358. nfs_free_seqid(p->arg.open_seqid);
  6359. out_free:
  6360. kfree(p);
  6361. return NULL;
  6362. }
  6363. static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
  6364. {
  6365. struct nfs4_lockdata *data = calldata;
  6366. struct nfs4_state *state = data->lsp->ls_state;
  6367. if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
  6368. goto out_wait;
  6369. /* Do we need to do an open_to_lock_owner? */
  6370. if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
  6371. if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
  6372. goto out_release_lock_seqid;
  6373. }
  6374. nfs4_stateid_copy(&data->arg.open_stateid,
  6375. &state->open_stateid);
  6376. data->arg.new_lock_owner = 1;
  6377. data->res.open_seqid = data->arg.open_seqid;
  6378. } else {
  6379. data->arg.new_lock_owner = 0;
  6380. nfs4_stateid_copy(&data->arg.lock_stateid,
  6381. &data->lsp->ls_stateid);
  6382. }
  6383. if (!nfs4_valid_open_stateid(state)) {
  6384. data->rpc_status = -EBADF;
  6385. task->tk_action = NULL;
  6386. goto out_release_open_seqid;
  6387. }
  6388. data->timestamp = jiffies;
  6389. if (nfs4_setup_sequence(data->server->nfs_client,
  6390. &data->arg.seq_args,
  6391. &data->res.seq_res,
  6392. task) == 0)
  6393. return;
  6394. out_release_open_seqid:
  6395. nfs_release_seqid(data->arg.open_seqid);
  6396. out_release_lock_seqid:
  6397. nfs_release_seqid(data->arg.lock_seqid);
  6398. out_wait:
  6399. nfs4_sequence_done(task, &data->res.seq_res);
  6400. dprintk("%s: ret = %d\n", __func__, data->rpc_status);
  6401. }
  6402. static void nfs4_lock_done(struct rpc_task *task, void *calldata)
  6403. {
  6404. struct nfs4_lockdata *data = calldata;
  6405. struct nfs4_lock_state *lsp = data->lsp;
  6406. if (!nfs4_sequence_done(task, &data->res.seq_res))
  6407. return;
  6408. data->rpc_status = task->tk_status;
  6409. switch (task->tk_status) {
  6410. case 0:
  6411. renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
  6412. data->timestamp);
  6413. if (data->arg.new_lock && !data->cancelled) {
  6414. data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
  6415. if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
  6416. goto out_restart;
  6417. }
  6418. if (data->arg.new_lock_owner != 0) {
  6419. nfs_confirm_seqid(&lsp->ls_seqid, 0);
  6420. nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
  6421. set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
  6422. } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
  6423. goto out_restart;
  6424. break;
  6425. case -NFS4ERR_OLD_STATEID:
  6426. if (data->arg.new_lock_owner != 0 &&
  6427. nfs4_refresh_open_old_stateid(&data->arg.open_stateid,
  6428. lsp->ls_state))
  6429. goto out_restart;
  6430. if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp))
  6431. goto out_restart;
  6432. fallthrough;
  6433. case -NFS4ERR_BAD_STATEID:
  6434. case -NFS4ERR_STALE_STATEID:
  6435. case -NFS4ERR_EXPIRED:
  6436. if (data->arg.new_lock_owner != 0) {
  6437. if (!nfs4_stateid_match(&data->arg.open_stateid,
  6438. &lsp->ls_state->open_stateid))
  6439. goto out_restart;
  6440. } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
  6441. &lsp->ls_stateid))
  6442. goto out_restart;
  6443. }
  6444. out_done:
  6445. dprintk("%s: ret = %d!\n", __func__, data->rpc_status);
  6446. return;
  6447. out_restart:
  6448. if (!data->cancelled)
  6449. rpc_restart_call_prepare(task);
  6450. goto out_done;
  6451. }
  6452. static void nfs4_lock_release(void *calldata)
  6453. {
  6454. struct nfs4_lockdata *data = calldata;
  6455. nfs_free_seqid(data->arg.open_seqid);
  6456. if (data->cancelled && data->rpc_status == 0) {
  6457. struct rpc_task *task;
  6458. task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
  6459. data->arg.lock_seqid);
  6460. if (!IS_ERR(task))
  6461. rpc_put_task_async(task);
  6462. dprintk("%s: cancelling lock!\n", __func__);
  6463. } else
  6464. nfs_free_seqid(data->arg.lock_seqid);
  6465. nfs4_put_lock_state(data->lsp);
  6466. put_nfs_open_context(data->ctx);
  6467. kfree(data);
  6468. }
  6469. static const struct rpc_call_ops nfs4_lock_ops = {
  6470. .rpc_call_prepare = nfs4_lock_prepare,
  6471. .rpc_call_done = nfs4_lock_done,
  6472. .rpc_release = nfs4_lock_release,
  6473. };
  6474. static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
  6475. {
  6476. switch (error) {
  6477. case -NFS4ERR_ADMIN_REVOKED:
  6478. case -NFS4ERR_EXPIRED:
  6479. case -NFS4ERR_BAD_STATEID:
  6480. lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
  6481. if (new_lock_owner != 0 ||
  6482. test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
  6483. nfs4_schedule_stateid_recovery(server, lsp->ls_state);
  6484. break;
  6485. case -NFS4ERR_STALE_STATEID:
  6486. lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
  6487. nfs4_schedule_lease_recovery(server->nfs_client);
  6488. }
  6489. }
  6490. static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
  6491. {
  6492. struct nfs4_lockdata *data;
  6493. struct rpc_task *task;
  6494. struct rpc_message msg = {
  6495. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
  6496. .rpc_cred = state->owner->so_cred,
  6497. };
  6498. struct rpc_task_setup task_setup_data = {
  6499. .rpc_client = NFS_CLIENT(state->inode),
  6500. .rpc_message = &msg,
  6501. .callback_ops = &nfs4_lock_ops,
  6502. .workqueue = nfsiod_workqueue,
  6503. .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF,
  6504. };
  6505. int ret;
  6506. if (nfs_server_capable(state->inode, NFS_CAP_MOVEABLE))
  6507. task_setup_data.flags |= RPC_TASK_MOVEABLE;
  6508. data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
  6509. fl->fl_u.nfs4_fl.owner, GFP_KERNEL);
  6510. if (data == NULL)
  6511. return -ENOMEM;
  6512. if (IS_SETLKW(cmd))
  6513. data->arg.block = 1;
  6514. nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1,
  6515. recovery_type > NFS_LOCK_NEW);
  6516. msg.rpc_argp = &data->arg;
  6517. msg.rpc_resp = &data->res;
  6518. task_setup_data.callback_data = data;
  6519. if (recovery_type > NFS_LOCK_NEW) {
  6520. if (recovery_type == NFS_LOCK_RECLAIM)
  6521. data->arg.reclaim = NFS_LOCK_RECLAIM;
  6522. } else
  6523. data->arg.new_lock = 1;
  6524. task = rpc_run_task(&task_setup_data);
  6525. if (IS_ERR(task))
  6526. return PTR_ERR(task);
  6527. ret = rpc_wait_for_completion_task(task);
  6528. if (ret == 0) {
  6529. ret = data->rpc_status;
  6530. if (ret)
  6531. nfs4_handle_setlk_error(data->server, data->lsp,
  6532. data->arg.new_lock_owner, ret);
  6533. } else
  6534. data->cancelled = true;
  6535. trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
  6536. rpc_put_task(task);
  6537. dprintk("%s: ret = %d\n", __func__, ret);
  6538. return ret;
  6539. }
  6540. static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
  6541. {
  6542. struct nfs_server *server = NFS_SERVER(state->inode);
  6543. struct nfs4_exception exception = {
  6544. .inode = state->inode,
  6545. };
  6546. int err;
  6547. do {
  6548. /* Cache the lock if possible... */
  6549. if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
  6550. return 0;
  6551. err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
  6552. if (err != -NFS4ERR_DELAY)
  6553. break;
  6554. nfs4_handle_exception(server, err, &exception);
  6555. } while (exception.retry);
  6556. return err;
  6557. }
  6558. static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
  6559. {
  6560. struct nfs_server *server = NFS_SERVER(state->inode);
  6561. struct nfs4_exception exception = {
  6562. .inode = state->inode,
  6563. };
  6564. int err;
  6565. err = nfs4_set_lock_state(state, request);
  6566. if (err != 0)
  6567. return err;
  6568. if (!recover_lost_locks) {
  6569. set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
  6570. return 0;
  6571. }
  6572. do {
  6573. if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
  6574. return 0;
  6575. err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
  6576. switch (err) {
  6577. default:
  6578. goto out;
  6579. case -NFS4ERR_GRACE:
  6580. case -NFS4ERR_DELAY:
  6581. nfs4_handle_exception(server, err, &exception);
  6582. err = 0;
  6583. }
  6584. } while (exception.retry);
  6585. out:
  6586. return err;
  6587. }
  6588. #if defined(CONFIG_NFS_V4_1)
  6589. static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
  6590. {
  6591. struct nfs4_lock_state *lsp;
  6592. int status;
  6593. status = nfs4_set_lock_state(state, request);
  6594. if (status != 0)
  6595. return status;
  6596. lsp = request->fl_u.nfs4_fl.owner;
  6597. if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
  6598. test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
  6599. return 0;
  6600. return nfs4_lock_expired(state, request);
  6601. }
  6602. #endif
  6603. static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
  6604. {
  6605. struct nfs_inode *nfsi = NFS_I(state->inode);
  6606. struct nfs4_state_owner *sp = state->owner;
  6607. unsigned char fl_flags = request->fl_flags;
  6608. int status;
  6609. request->fl_flags |= FL_ACCESS;
  6610. status = locks_lock_inode_wait(state->inode, request);
  6611. if (status < 0)
  6612. goto out;
  6613. mutex_lock(&sp->so_delegreturn_mutex);
  6614. down_read(&nfsi->rwsem);
  6615. if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
  6616. /* Yes: cache locks! */
  6617. /* ...but avoid races with delegation recall... */
  6618. request->fl_flags = fl_flags & ~FL_SLEEP;
  6619. status = locks_lock_inode_wait(state->inode, request);
  6620. up_read(&nfsi->rwsem);
  6621. mutex_unlock(&sp->so_delegreturn_mutex);
  6622. goto out;
  6623. }
  6624. up_read(&nfsi->rwsem);
  6625. mutex_unlock(&sp->so_delegreturn_mutex);
  6626. status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
  6627. out:
  6628. request->fl_flags = fl_flags;
  6629. return status;
  6630. }
  6631. static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
  6632. {
  6633. struct nfs4_exception exception = {
  6634. .state = state,
  6635. .inode = state->inode,
  6636. .interruptible = true,
  6637. };
  6638. int err;
  6639. do {
  6640. err = _nfs4_proc_setlk(state, cmd, request);
  6641. if (err == -NFS4ERR_DENIED)
  6642. err = -EAGAIN;
  6643. err = nfs4_handle_exception(NFS_SERVER(state->inode),
  6644. err, &exception);
  6645. } while (exception.retry);
  6646. return err;
  6647. }
  6648. #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
  6649. #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
  6650. static int
  6651. nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
  6652. struct file_lock *request)
  6653. {
  6654. int status = -ERESTARTSYS;
  6655. unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
  6656. while(!signalled()) {
  6657. status = nfs4_proc_setlk(state, cmd, request);
  6658. if ((status != -EAGAIN) || IS_SETLK(cmd))
  6659. break;
  6660. __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
  6661. schedule_timeout(timeout);
  6662. timeout *= 2;
  6663. timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
  6664. status = -ERESTARTSYS;
  6665. }
  6666. return status;
  6667. }
  6668. #ifdef CONFIG_NFS_V4_1
  6669. struct nfs4_lock_waiter {
  6670. struct inode *inode;
  6671. struct nfs_lowner owner;
  6672. wait_queue_entry_t wait;
  6673. };
  6674. static int
  6675. nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
  6676. {
  6677. struct nfs4_lock_waiter *waiter =
  6678. container_of(wait, struct nfs4_lock_waiter, wait);
  6679. /* NULL key means to wake up everyone */
  6680. if (key) {
  6681. struct cb_notify_lock_args *cbnl = key;
  6682. struct nfs_lowner *lowner = &cbnl->cbnl_owner,
  6683. *wowner = &waiter->owner;
  6684. /* Only wake if the callback was for the same owner. */
  6685. if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
  6686. return 0;
  6687. /* Make sure it's for the right inode */
  6688. if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
  6689. return 0;
  6690. }
  6691. return woken_wake_function(wait, mode, flags, key);
  6692. }
  6693. static int
  6694. nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
  6695. {
  6696. struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
  6697. struct nfs_server *server = NFS_SERVER(state->inode);
  6698. struct nfs_client *clp = server->nfs_client;
  6699. wait_queue_head_t *q = &clp->cl_lock_waitq;
  6700. struct nfs4_lock_waiter waiter = {
  6701. .inode = state->inode,
  6702. .owner = { .clientid = clp->cl_clientid,
  6703. .id = lsp->ls_seqid.owner_id,
  6704. .s_dev = server->s_dev },
  6705. };
  6706. int status;
  6707. /* Don't bother with waitqueue if we don't expect a callback */
  6708. if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
  6709. return nfs4_retry_setlk_simple(state, cmd, request);
  6710. init_wait(&waiter.wait);
  6711. waiter.wait.func = nfs4_wake_lock_waiter;
  6712. add_wait_queue(q, &waiter.wait);
  6713. do {
  6714. status = nfs4_proc_setlk(state, cmd, request);
  6715. if (status != -EAGAIN || IS_SETLK(cmd))
  6716. break;
  6717. status = -ERESTARTSYS;
  6718. wait_woken(&waiter.wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE,
  6719. NFS4_LOCK_MAXTIMEOUT);
  6720. } while (!signalled());
  6721. remove_wait_queue(q, &waiter.wait);
  6722. return status;
  6723. }
  6724. #else /* !CONFIG_NFS_V4_1 */
  6725. static inline int
  6726. nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
  6727. {
  6728. return nfs4_retry_setlk_simple(state, cmd, request);
  6729. }
  6730. #endif
  6731. static int
  6732. nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
  6733. {
  6734. struct nfs_open_context *ctx;
  6735. struct nfs4_state *state;
  6736. int status;
  6737. /* verify open state */
  6738. ctx = nfs_file_open_context(filp);
  6739. state = ctx->state;
  6740. if (IS_GETLK(cmd)) {
  6741. if (state != NULL)
  6742. return nfs4_proc_getlk(state, F_GETLK, request);
  6743. return 0;
  6744. }
  6745. if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
  6746. return -EINVAL;
  6747. if (request->fl_type == F_UNLCK) {
  6748. if (state != NULL)
  6749. return nfs4_proc_unlck(state, cmd, request);
  6750. return 0;
  6751. }
  6752. if (state == NULL)
  6753. return -ENOLCK;
  6754. if ((request->fl_flags & FL_POSIX) &&
  6755. !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
  6756. return -ENOLCK;
  6757. /*
  6758. * Don't rely on the VFS having checked the file open mode,
  6759. * since it won't do this for flock() locks.
  6760. */
  6761. switch (request->fl_type) {
  6762. case F_RDLCK:
  6763. if (!(filp->f_mode & FMODE_READ))
  6764. return -EBADF;
  6765. break;
  6766. case F_WRLCK:
  6767. if (!(filp->f_mode & FMODE_WRITE))
  6768. return -EBADF;
  6769. }
  6770. status = nfs4_set_lock_state(state, request);
  6771. if (status != 0)
  6772. return status;
  6773. return nfs4_retry_setlk(state, cmd, request);
  6774. }
  6775. static int nfs4_delete_lease(struct file *file, void **priv)
  6776. {
  6777. return generic_setlease(file, F_UNLCK, NULL, priv);
  6778. }
  6779. static int nfs4_add_lease(struct file *file, long arg, struct file_lock **lease,
  6780. void **priv)
  6781. {
  6782. struct inode *inode = file_inode(file);
  6783. fmode_t type = arg == F_RDLCK ? FMODE_READ : FMODE_WRITE;
  6784. int ret;
  6785. /* No delegation, no lease */
  6786. if (!nfs4_have_delegation(inode, type))
  6787. return -EAGAIN;
  6788. ret = generic_setlease(file, arg, lease, priv);
  6789. if (ret || nfs4_have_delegation(inode, type))
  6790. return ret;
  6791. /* We raced with a delegation return */
  6792. nfs4_delete_lease(file, priv);
  6793. return -EAGAIN;
  6794. }
  6795. int nfs4_proc_setlease(struct file *file, long arg, struct file_lock **lease,
  6796. void **priv)
  6797. {
  6798. switch (arg) {
  6799. case F_RDLCK:
  6800. case F_WRLCK:
  6801. return nfs4_add_lease(file, arg, lease, priv);
  6802. case F_UNLCK:
  6803. return nfs4_delete_lease(file, priv);
  6804. default:
  6805. return -EINVAL;
  6806. }
  6807. }
  6808. int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
  6809. {
  6810. struct nfs_server *server = NFS_SERVER(state->inode);
  6811. int err;
  6812. err = nfs4_set_lock_state(state, fl);
  6813. if (err != 0)
  6814. return err;
  6815. do {
  6816. err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
  6817. if (err != -NFS4ERR_DELAY)
  6818. break;
  6819. ssleep(1);
  6820. } while (err == -NFS4ERR_DELAY);
  6821. return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
  6822. }
  6823. struct nfs_release_lockowner_data {
  6824. struct nfs4_lock_state *lsp;
  6825. struct nfs_server *server;
  6826. struct nfs_release_lockowner_args args;
  6827. struct nfs_release_lockowner_res res;
  6828. unsigned long timestamp;
  6829. };
  6830. static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
  6831. {
  6832. struct nfs_release_lockowner_data *data = calldata;
  6833. struct nfs_server *server = data->server;
  6834. nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
  6835. &data->res.seq_res, task);
  6836. data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
  6837. data->timestamp = jiffies;
  6838. }
  6839. static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
  6840. {
  6841. struct nfs_release_lockowner_data *data = calldata;
  6842. struct nfs_server *server = data->server;
  6843. nfs40_sequence_done(task, &data->res.seq_res);
  6844. switch (task->tk_status) {
  6845. case 0:
  6846. renew_lease(server, data->timestamp);
  6847. break;
  6848. case -NFS4ERR_STALE_CLIENTID:
  6849. case -NFS4ERR_EXPIRED:
  6850. nfs4_schedule_lease_recovery(server->nfs_client);
  6851. break;
  6852. case -NFS4ERR_LEASE_MOVED:
  6853. case -NFS4ERR_DELAY:
  6854. if (nfs4_async_handle_error(task, server,
  6855. NULL, NULL) == -EAGAIN)
  6856. rpc_restart_call_prepare(task);
  6857. }
  6858. }
  6859. static void nfs4_release_lockowner_release(void *calldata)
  6860. {
  6861. struct nfs_release_lockowner_data *data = calldata;
  6862. nfs4_free_lock_state(data->server, data->lsp);
  6863. kfree(calldata);
  6864. }
  6865. static const struct rpc_call_ops nfs4_release_lockowner_ops = {
  6866. .rpc_call_prepare = nfs4_release_lockowner_prepare,
  6867. .rpc_call_done = nfs4_release_lockowner_done,
  6868. .rpc_release = nfs4_release_lockowner_release,
  6869. };
  6870. static void
  6871. nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
  6872. {
  6873. struct nfs_release_lockowner_data *data;
  6874. struct rpc_message msg = {
  6875. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
  6876. };
  6877. if (server->nfs_client->cl_mvops->minor_version != 0)
  6878. return;
  6879. data = kmalloc(sizeof(*data), GFP_KERNEL);
  6880. if (!data)
  6881. return;
  6882. data->lsp = lsp;
  6883. data->server = server;
  6884. data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
  6885. data->args.lock_owner.id = lsp->ls_seqid.owner_id;
  6886. data->args.lock_owner.s_dev = server->s_dev;
  6887. msg.rpc_argp = &data->args;
  6888. msg.rpc_resp = &data->res;
  6889. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
  6890. rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
  6891. }
  6892. #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
  6893. static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
  6894. struct user_namespace *mnt_userns,
  6895. struct dentry *unused, struct inode *inode,
  6896. const char *key, const void *buf,
  6897. size_t buflen, int flags)
  6898. {
  6899. return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_ACL);
  6900. }
  6901. static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
  6902. struct dentry *unused, struct inode *inode,
  6903. const char *key, void *buf, size_t buflen)
  6904. {
  6905. return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_ACL);
  6906. }
  6907. static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
  6908. {
  6909. return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_ACL);
  6910. }
  6911. #if defined(CONFIG_NFS_V4_1)
  6912. #define XATTR_NAME_NFSV4_DACL "system.nfs4_dacl"
  6913. static int nfs4_xattr_set_nfs4_dacl(const struct xattr_handler *handler,
  6914. struct user_namespace *mnt_userns,
  6915. struct dentry *unused, struct inode *inode,
  6916. const char *key, const void *buf,
  6917. size_t buflen, int flags)
  6918. {
  6919. return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_DACL);
  6920. }
  6921. static int nfs4_xattr_get_nfs4_dacl(const struct xattr_handler *handler,
  6922. struct dentry *unused, struct inode *inode,
  6923. const char *key, void *buf, size_t buflen)
  6924. {
  6925. return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_DACL);
  6926. }
  6927. static bool nfs4_xattr_list_nfs4_dacl(struct dentry *dentry)
  6928. {
  6929. return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_DACL);
  6930. }
  6931. #define XATTR_NAME_NFSV4_SACL "system.nfs4_sacl"
  6932. static int nfs4_xattr_set_nfs4_sacl(const struct xattr_handler *handler,
  6933. struct user_namespace *mnt_userns,
  6934. struct dentry *unused, struct inode *inode,
  6935. const char *key, const void *buf,
  6936. size_t buflen, int flags)
  6937. {
  6938. return nfs4_proc_set_acl(inode, buf, buflen, NFS4ACL_SACL);
  6939. }
  6940. static int nfs4_xattr_get_nfs4_sacl(const struct xattr_handler *handler,
  6941. struct dentry *unused, struct inode *inode,
  6942. const char *key, void *buf, size_t buflen)
  6943. {
  6944. return nfs4_proc_get_acl(inode, buf, buflen, NFS4ACL_SACL);
  6945. }
  6946. static bool nfs4_xattr_list_nfs4_sacl(struct dentry *dentry)
  6947. {
  6948. return nfs4_server_supports_acls(NFS_SB(dentry->d_sb), NFS4ACL_SACL);
  6949. }
  6950. #endif
  6951. #ifdef CONFIG_NFS_V4_SECURITY_LABEL
  6952. static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
  6953. struct user_namespace *mnt_userns,
  6954. struct dentry *unused, struct inode *inode,
  6955. const char *key, const void *buf,
  6956. size_t buflen, int flags)
  6957. {
  6958. if (security_ismaclabel(key))
  6959. return nfs4_set_security_label(inode, buf, buflen);
  6960. return -EOPNOTSUPP;
  6961. }
  6962. static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
  6963. struct dentry *unused, struct inode *inode,
  6964. const char *key, void *buf, size_t buflen)
  6965. {
  6966. if (security_ismaclabel(key))
  6967. return nfs4_get_security_label(inode, buf, buflen);
  6968. return -EOPNOTSUPP;
  6969. }
  6970. static ssize_t
  6971. nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
  6972. {
  6973. int len = 0;
  6974. if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
  6975. len = security_inode_listsecurity(inode, list, list_len);
  6976. if (len >= 0 && list_len && len > list_len)
  6977. return -ERANGE;
  6978. }
  6979. return len;
  6980. }
  6981. static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
  6982. .prefix = XATTR_SECURITY_PREFIX,
  6983. .get = nfs4_xattr_get_nfs4_label,
  6984. .set = nfs4_xattr_set_nfs4_label,
  6985. };
  6986. #else
  6987. static ssize_t
  6988. nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
  6989. {
  6990. return 0;
  6991. }
  6992. #endif
  6993. #ifdef CONFIG_NFS_V4_2
  6994. static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
  6995. struct user_namespace *mnt_userns,
  6996. struct dentry *unused, struct inode *inode,
  6997. const char *key, const void *buf,
  6998. size_t buflen, int flags)
  6999. {
  7000. u32 mask;
  7001. int ret;
  7002. if (!nfs_server_capable(inode, NFS_CAP_XATTR))
  7003. return -EOPNOTSUPP;
  7004. /*
  7005. * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA*
  7006. * flags right now. Handling of xattr operations use the normal
  7007. * file read/write permissions.
  7008. *
  7009. * Just in case the server has other ideas (which RFC 8276 allows),
  7010. * do a cached access check for the XA* flags to possibly avoid
  7011. * doing an RPC and getting EACCES back.
  7012. */
  7013. if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
  7014. if (!(mask & NFS_ACCESS_XAWRITE))
  7015. return -EACCES;
  7016. }
  7017. if (buf == NULL) {
  7018. ret = nfs42_proc_removexattr(inode, key);
  7019. if (!ret)
  7020. nfs4_xattr_cache_remove(inode, key);
  7021. } else {
  7022. ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags);
  7023. if (!ret)
  7024. nfs4_xattr_cache_add(inode, key, buf, NULL, buflen);
  7025. }
  7026. return ret;
  7027. }
  7028. static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
  7029. struct dentry *unused, struct inode *inode,
  7030. const char *key, void *buf, size_t buflen)
  7031. {
  7032. u32 mask;
  7033. ssize_t ret;
  7034. if (!nfs_server_capable(inode, NFS_CAP_XATTR))
  7035. return -EOPNOTSUPP;
  7036. if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
  7037. if (!(mask & NFS_ACCESS_XAREAD))
  7038. return -EACCES;
  7039. }
  7040. ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
  7041. if (ret)
  7042. return ret;
  7043. ret = nfs4_xattr_cache_get(inode, key, buf, buflen);
  7044. if (ret >= 0 || (ret < 0 && ret != -ENOENT))
  7045. return ret;
  7046. ret = nfs42_proc_getxattr(inode, key, buf, buflen);
  7047. return ret;
  7048. }
  7049. static ssize_t
  7050. nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
  7051. {
  7052. u64 cookie;
  7053. bool eof;
  7054. ssize_t ret, size;
  7055. char *buf;
  7056. size_t buflen;
  7057. u32 mask;
  7058. if (!nfs_server_capable(inode, NFS_CAP_XATTR))
  7059. return 0;
  7060. if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
  7061. if (!(mask & NFS_ACCESS_XALIST))
  7062. return 0;
  7063. }
  7064. ret = nfs_revalidate_inode(inode, NFS_INO_INVALID_CHANGE);
  7065. if (ret)
  7066. return ret;
  7067. ret = nfs4_xattr_cache_list(inode, list, list_len);
  7068. if (ret >= 0 || (ret < 0 && ret != -ENOENT))
  7069. return ret;
  7070. cookie = 0;
  7071. eof = false;
  7072. buflen = list_len ? list_len : XATTR_LIST_MAX;
  7073. buf = list_len ? list : NULL;
  7074. size = 0;
  7075. while (!eof) {
  7076. ret = nfs42_proc_listxattrs(inode, buf, buflen,
  7077. &cookie, &eof);
  7078. if (ret < 0)
  7079. return ret;
  7080. if (list_len) {
  7081. buf += ret;
  7082. buflen -= ret;
  7083. }
  7084. size += ret;
  7085. }
  7086. if (list_len)
  7087. nfs4_xattr_cache_set_list(inode, list, size);
  7088. return size;
  7089. }
  7090. #else
  7091. static ssize_t
  7092. nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
  7093. {
  7094. return 0;
  7095. }
  7096. #endif /* CONFIG_NFS_V4_2 */
  7097. /*
  7098. * nfs_fhget will use either the mounted_on_fileid or the fileid
  7099. */
  7100. static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
  7101. {
  7102. if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
  7103. (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
  7104. (fattr->valid & NFS_ATTR_FATTR_FSID) &&
  7105. (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
  7106. return;
  7107. fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
  7108. NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
  7109. fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
  7110. fattr->nlink = 2;
  7111. }
  7112. static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
  7113. const struct qstr *name,
  7114. struct nfs4_fs_locations *fs_locations,
  7115. struct page *page)
  7116. {
  7117. struct nfs_server *server = NFS_SERVER(dir);
  7118. u32 bitmask[3];
  7119. struct nfs4_fs_locations_arg args = {
  7120. .dir_fh = NFS_FH(dir),
  7121. .name = name,
  7122. .page = page,
  7123. .bitmask = bitmask,
  7124. };
  7125. struct nfs4_fs_locations_res res = {
  7126. .fs_locations = fs_locations,
  7127. };
  7128. struct rpc_message msg = {
  7129. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
  7130. .rpc_argp = &args,
  7131. .rpc_resp = &res,
  7132. };
  7133. int status;
  7134. dprintk("%s: start\n", __func__);
  7135. bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
  7136. bitmask[1] = nfs4_fattr_bitmap[1];
  7137. /* Ask for the fileid of the absent filesystem if mounted_on_fileid
  7138. * is not supported */
  7139. if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
  7140. bitmask[0] &= ~FATTR4_WORD0_FILEID;
  7141. else
  7142. bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
  7143. nfs_fattr_init(fs_locations->fattr);
  7144. fs_locations->server = server;
  7145. fs_locations->nlocations = 0;
  7146. status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
  7147. dprintk("%s: returned status = %d\n", __func__, status);
  7148. return status;
  7149. }
  7150. int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
  7151. const struct qstr *name,
  7152. struct nfs4_fs_locations *fs_locations,
  7153. struct page *page)
  7154. {
  7155. struct nfs4_exception exception = {
  7156. .interruptible = true,
  7157. };
  7158. int err;
  7159. do {
  7160. err = _nfs4_proc_fs_locations(client, dir, name,
  7161. fs_locations, page);
  7162. trace_nfs4_get_fs_locations(dir, name, err);
  7163. err = nfs4_handle_exception(NFS_SERVER(dir), err,
  7164. &exception);
  7165. } while (exception.retry);
  7166. return err;
  7167. }
  7168. /*
  7169. * This operation also signals the server that this client is
  7170. * performing migration recovery. The server can stop returning
  7171. * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
  7172. * appended to this compound to identify the client ID which is
  7173. * performing recovery.
  7174. */
  7175. static int _nfs40_proc_get_locations(struct nfs_server *server,
  7176. struct nfs_fh *fhandle,
  7177. struct nfs4_fs_locations *locations,
  7178. struct page *page, const struct cred *cred)
  7179. {
  7180. struct rpc_clnt *clnt = server->client;
  7181. u32 bitmask[2] = {
  7182. [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
  7183. };
  7184. struct nfs4_fs_locations_arg args = {
  7185. .clientid = server->nfs_client->cl_clientid,
  7186. .fh = fhandle,
  7187. .page = page,
  7188. .bitmask = bitmask,
  7189. .migration = 1, /* skip LOOKUP */
  7190. .renew = 1, /* append RENEW */
  7191. };
  7192. struct nfs4_fs_locations_res res = {
  7193. .fs_locations = locations,
  7194. .migration = 1,
  7195. .renew = 1,
  7196. };
  7197. struct rpc_message msg = {
  7198. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
  7199. .rpc_argp = &args,
  7200. .rpc_resp = &res,
  7201. .rpc_cred = cred,
  7202. };
  7203. unsigned long now = jiffies;
  7204. int status;
  7205. nfs_fattr_init(locations->fattr);
  7206. locations->server = server;
  7207. locations->nlocations = 0;
  7208. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
  7209. status = nfs4_call_sync_sequence(clnt, server, &msg,
  7210. &args.seq_args, &res.seq_res);
  7211. if (status)
  7212. return status;
  7213. renew_lease(server, now);
  7214. return 0;
  7215. }
  7216. #ifdef CONFIG_NFS_V4_1
  7217. /*
  7218. * This operation also signals the server that this client is
  7219. * performing migration recovery. The server can stop asserting
  7220. * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
  7221. * performing this operation is identified in the SEQUENCE
  7222. * operation in this compound.
  7223. *
  7224. * When the client supports GETATTR(fs_locations_info), it can
  7225. * be plumbed in here.
  7226. */
  7227. static int _nfs41_proc_get_locations(struct nfs_server *server,
  7228. struct nfs_fh *fhandle,
  7229. struct nfs4_fs_locations *locations,
  7230. struct page *page, const struct cred *cred)
  7231. {
  7232. struct rpc_clnt *clnt = server->client;
  7233. u32 bitmask[2] = {
  7234. [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
  7235. };
  7236. struct nfs4_fs_locations_arg args = {
  7237. .fh = fhandle,
  7238. .page = page,
  7239. .bitmask = bitmask,
  7240. .migration = 1, /* skip LOOKUP */
  7241. };
  7242. struct nfs4_fs_locations_res res = {
  7243. .fs_locations = locations,
  7244. .migration = 1,
  7245. };
  7246. struct rpc_message msg = {
  7247. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
  7248. .rpc_argp = &args,
  7249. .rpc_resp = &res,
  7250. .rpc_cred = cred,
  7251. };
  7252. struct nfs4_call_sync_data data = {
  7253. .seq_server = server,
  7254. .seq_args = &args.seq_args,
  7255. .seq_res = &res.seq_res,
  7256. };
  7257. struct rpc_task_setup task_setup_data = {
  7258. .rpc_client = clnt,
  7259. .rpc_message = &msg,
  7260. .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
  7261. .callback_data = &data,
  7262. .flags = RPC_TASK_NO_ROUND_ROBIN,
  7263. };
  7264. int status;
  7265. nfs_fattr_init(locations->fattr);
  7266. locations->server = server;
  7267. locations->nlocations = 0;
  7268. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
  7269. status = nfs4_call_sync_custom(&task_setup_data);
  7270. if (status == NFS4_OK &&
  7271. res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
  7272. status = -NFS4ERR_LEASE_MOVED;
  7273. return status;
  7274. }
  7275. #endif /* CONFIG_NFS_V4_1 */
  7276. /**
  7277. * nfs4_proc_get_locations - discover locations for a migrated FSID
  7278. * @server: pointer to nfs_server to process
  7279. * @fhandle: pointer to the kernel NFS client file handle
  7280. * @locations: result of query
  7281. * @page: buffer
  7282. * @cred: credential to use for this operation
  7283. *
  7284. * Returns NFS4_OK on success, a negative NFS4ERR status code if the
  7285. * operation failed, or a negative errno if a local error occurred.
  7286. *
  7287. * On success, "locations" is filled in, but if the server has
  7288. * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
  7289. * asserted.
  7290. *
  7291. * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
  7292. * from this client that require migration recovery.
  7293. */
  7294. int nfs4_proc_get_locations(struct nfs_server *server,
  7295. struct nfs_fh *fhandle,
  7296. struct nfs4_fs_locations *locations,
  7297. struct page *page, const struct cred *cred)
  7298. {
  7299. struct nfs_client *clp = server->nfs_client;
  7300. const struct nfs4_mig_recovery_ops *ops =
  7301. clp->cl_mvops->mig_recovery_ops;
  7302. struct nfs4_exception exception = {
  7303. .interruptible = true,
  7304. };
  7305. int status;
  7306. dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
  7307. (unsigned long long)server->fsid.major,
  7308. (unsigned long long)server->fsid.minor,
  7309. clp->cl_hostname);
  7310. nfs_display_fhandle(fhandle, __func__);
  7311. do {
  7312. status = ops->get_locations(server, fhandle, locations, page,
  7313. cred);
  7314. if (status != -NFS4ERR_DELAY)
  7315. break;
  7316. nfs4_handle_exception(server, status, &exception);
  7317. } while (exception.retry);
  7318. return status;
  7319. }
  7320. /*
  7321. * This operation also signals the server that this client is
  7322. * performing "lease moved" recovery. The server can stop
  7323. * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
  7324. * is appended to this compound to identify the client ID which is
  7325. * performing recovery.
  7326. */
  7327. static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
  7328. {
  7329. struct nfs_server *server = NFS_SERVER(inode);
  7330. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  7331. struct rpc_clnt *clnt = server->client;
  7332. struct nfs4_fsid_present_arg args = {
  7333. .fh = NFS_FH(inode),
  7334. .clientid = clp->cl_clientid,
  7335. .renew = 1, /* append RENEW */
  7336. };
  7337. struct nfs4_fsid_present_res res = {
  7338. .renew = 1,
  7339. };
  7340. struct rpc_message msg = {
  7341. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
  7342. .rpc_argp = &args,
  7343. .rpc_resp = &res,
  7344. .rpc_cred = cred,
  7345. };
  7346. unsigned long now = jiffies;
  7347. int status;
  7348. res.fh = nfs_alloc_fhandle();
  7349. if (res.fh == NULL)
  7350. return -ENOMEM;
  7351. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
  7352. status = nfs4_call_sync_sequence(clnt, server, &msg,
  7353. &args.seq_args, &res.seq_res);
  7354. nfs_free_fhandle(res.fh);
  7355. if (status)
  7356. return status;
  7357. do_renew_lease(clp, now);
  7358. return 0;
  7359. }
  7360. #ifdef CONFIG_NFS_V4_1
  7361. /*
  7362. * This operation also signals the server that this client is
  7363. * performing "lease moved" recovery. The server can stop asserting
  7364. * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
  7365. * this operation is identified in the SEQUENCE operation in this
  7366. * compound.
  7367. */
  7368. static int _nfs41_proc_fsid_present(struct inode *inode, const struct cred *cred)
  7369. {
  7370. struct nfs_server *server = NFS_SERVER(inode);
  7371. struct rpc_clnt *clnt = server->client;
  7372. struct nfs4_fsid_present_arg args = {
  7373. .fh = NFS_FH(inode),
  7374. };
  7375. struct nfs4_fsid_present_res res = {
  7376. };
  7377. struct rpc_message msg = {
  7378. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
  7379. .rpc_argp = &args,
  7380. .rpc_resp = &res,
  7381. .rpc_cred = cred,
  7382. };
  7383. int status;
  7384. res.fh = nfs_alloc_fhandle();
  7385. if (res.fh == NULL)
  7386. return -ENOMEM;
  7387. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
  7388. status = nfs4_call_sync_sequence(clnt, server, &msg,
  7389. &args.seq_args, &res.seq_res);
  7390. nfs_free_fhandle(res.fh);
  7391. if (status == NFS4_OK &&
  7392. res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
  7393. status = -NFS4ERR_LEASE_MOVED;
  7394. return status;
  7395. }
  7396. #endif /* CONFIG_NFS_V4_1 */
  7397. /**
  7398. * nfs4_proc_fsid_present - Is this FSID present or absent on server?
  7399. * @inode: inode on FSID to check
  7400. * @cred: credential to use for this operation
  7401. *
  7402. * Server indicates whether the FSID is present, moved, or not
  7403. * recognized. This operation is necessary to clear a LEASE_MOVED
  7404. * condition for this client ID.
  7405. *
  7406. * Returns NFS4_OK if the FSID is present on this server,
  7407. * -NFS4ERR_MOVED if the FSID is no longer present, a negative
  7408. * NFS4ERR code if some error occurred on the server, or a
  7409. * negative errno if a local failure occurred.
  7410. */
  7411. int nfs4_proc_fsid_present(struct inode *inode, const struct cred *cred)
  7412. {
  7413. struct nfs_server *server = NFS_SERVER(inode);
  7414. struct nfs_client *clp = server->nfs_client;
  7415. const struct nfs4_mig_recovery_ops *ops =
  7416. clp->cl_mvops->mig_recovery_ops;
  7417. struct nfs4_exception exception = {
  7418. .interruptible = true,
  7419. };
  7420. int status;
  7421. dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
  7422. (unsigned long long)server->fsid.major,
  7423. (unsigned long long)server->fsid.minor,
  7424. clp->cl_hostname);
  7425. nfs_display_fhandle(NFS_FH(inode), __func__);
  7426. do {
  7427. status = ops->fsid_present(inode, cred);
  7428. if (status != -NFS4ERR_DELAY)
  7429. break;
  7430. nfs4_handle_exception(server, status, &exception);
  7431. } while (exception.retry);
  7432. return status;
  7433. }
  7434. /*
  7435. * If 'use_integrity' is true and the state managment nfs_client
  7436. * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
  7437. * and the machine credential as per RFC3530bis and RFC5661 Security
  7438. * Considerations sections. Otherwise, just use the user cred with the
  7439. * filesystem's rpc_client.
  7440. */
  7441. static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
  7442. {
  7443. int status;
  7444. struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
  7445. struct nfs_client *clp = NFS_SERVER(dir)->nfs_client;
  7446. struct nfs4_secinfo_arg args = {
  7447. .dir_fh = NFS_FH(dir),
  7448. .name = name,
  7449. };
  7450. struct nfs4_secinfo_res res = {
  7451. .flavors = flavors,
  7452. };
  7453. struct rpc_message msg = {
  7454. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
  7455. .rpc_argp = &args,
  7456. .rpc_resp = &res,
  7457. };
  7458. struct nfs4_call_sync_data data = {
  7459. .seq_server = NFS_SERVER(dir),
  7460. .seq_args = &args.seq_args,
  7461. .seq_res = &res.seq_res,
  7462. };
  7463. struct rpc_task_setup task_setup = {
  7464. .rpc_client = clnt,
  7465. .rpc_message = &msg,
  7466. .callback_ops = clp->cl_mvops->call_sync_ops,
  7467. .callback_data = &data,
  7468. .flags = RPC_TASK_NO_ROUND_ROBIN,
  7469. };
  7470. const struct cred *cred = NULL;
  7471. if (use_integrity) {
  7472. clnt = clp->cl_rpcclient;
  7473. task_setup.rpc_client = clnt;
  7474. cred = nfs4_get_clid_cred(clp);
  7475. msg.rpc_cred = cred;
  7476. }
  7477. dprintk("NFS call secinfo %s\n", name->name);
  7478. nfs4_state_protect(clp, NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
  7479. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
  7480. status = nfs4_call_sync_custom(&task_setup);
  7481. dprintk("NFS reply secinfo: %d\n", status);
  7482. put_cred(cred);
  7483. return status;
  7484. }
  7485. int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
  7486. struct nfs4_secinfo_flavors *flavors)
  7487. {
  7488. struct nfs4_exception exception = {
  7489. .interruptible = true,
  7490. };
  7491. int err;
  7492. do {
  7493. err = -NFS4ERR_WRONGSEC;
  7494. /* try to use integrity protection with machine cred */
  7495. if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
  7496. err = _nfs4_proc_secinfo(dir, name, flavors, true);
  7497. /*
  7498. * if unable to use integrity protection, or SECINFO with
  7499. * integrity protection returns NFS4ERR_WRONGSEC (which is
  7500. * disallowed by spec, but exists in deployed servers) use
  7501. * the current filesystem's rpc_client and the user cred.
  7502. */
  7503. if (err == -NFS4ERR_WRONGSEC)
  7504. err = _nfs4_proc_secinfo(dir, name, flavors, false);
  7505. trace_nfs4_secinfo(dir, name, err);
  7506. err = nfs4_handle_exception(NFS_SERVER(dir), err,
  7507. &exception);
  7508. } while (exception.retry);
  7509. return err;
  7510. }
  7511. #ifdef CONFIG_NFS_V4_1
  7512. /*
  7513. * Check the exchange flags returned by the server for invalid flags, having
  7514. * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
  7515. * DS flags set.
  7516. */
  7517. static int nfs4_check_cl_exchange_flags(u32 flags, u32 version)
  7518. {
  7519. if (version >= 2 && (flags & ~EXCHGID4_2_FLAG_MASK_R))
  7520. goto out_inval;
  7521. else if (version < 2 && (flags & ~EXCHGID4_FLAG_MASK_R))
  7522. goto out_inval;
  7523. if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
  7524. (flags & EXCHGID4_FLAG_USE_NON_PNFS))
  7525. goto out_inval;
  7526. if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
  7527. goto out_inval;
  7528. return NFS_OK;
  7529. out_inval:
  7530. return -NFS4ERR_INVAL;
  7531. }
  7532. static bool
  7533. nfs41_same_server_scope(struct nfs41_server_scope *a,
  7534. struct nfs41_server_scope *b)
  7535. {
  7536. if (a->server_scope_sz != b->server_scope_sz)
  7537. return false;
  7538. return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
  7539. }
  7540. static void
  7541. nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
  7542. {
  7543. struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
  7544. struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
  7545. struct nfs_client *clp = args->client;
  7546. switch (task->tk_status) {
  7547. case -NFS4ERR_BADSESSION:
  7548. case -NFS4ERR_DEADSESSION:
  7549. nfs4_schedule_session_recovery(clp->cl_session,
  7550. task->tk_status);
  7551. return;
  7552. }
  7553. if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
  7554. res->dir != NFS4_CDFS4_BOTH) {
  7555. rpc_task_close_connection(task);
  7556. if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
  7557. rpc_restart_call(task);
  7558. }
  7559. }
  7560. static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
  7561. .rpc_call_done = nfs4_bind_one_conn_to_session_done,
  7562. };
  7563. /*
  7564. * nfs4_proc_bind_one_conn_to_session()
  7565. *
  7566. * The 4.1 client currently uses the same TCP connection for the
  7567. * fore and backchannel.
  7568. */
  7569. static
  7570. int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
  7571. struct rpc_xprt *xprt,
  7572. struct nfs_client *clp,
  7573. const struct cred *cred)
  7574. {
  7575. int status;
  7576. struct nfs41_bind_conn_to_session_args args = {
  7577. .client = clp,
  7578. .dir = NFS4_CDFC4_FORE_OR_BOTH,
  7579. .retries = 0,
  7580. };
  7581. struct nfs41_bind_conn_to_session_res res;
  7582. struct rpc_message msg = {
  7583. .rpc_proc =
  7584. &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
  7585. .rpc_argp = &args,
  7586. .rpc_resp = &res,
  7587. .rpc_cred = cred,
  7588. };
  7589. struct rpc_task_setup task_setup_data = {
  7590. .rpc_client = clnt,
  7591. .rpc_xprt = xprt,
  7592. .callback_ops = &nfs4_bind_one_conn_to_session_ops,
  7593. .rpc_message = &msg,
  7594. .flags = RPC_TASK_TIMEOUT,
  7595. };
  7596. struct rpc_task *task;
  7597. nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
  7598. if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
  7599. args.dir = NFS4_CDFC4_FORE;
  7600. /* Do not set the backchannel flag unless this is clnt->cl_xprt */
  7601. if (xprt != rcu_access_pointer(clnt->cl_xprt))
  7602. args.dir = NFS4_CDFC4_FORE;
  7603. task = rpc_run_task(&task_setup_data);
  7604. if (!IS_ERR(task)) {
  7605. status = task->tk_status;
  7606. rpc_put_task(task);
  7607. } else
  7608. status = PTR_ERR(task);
  7609. trace_nfs4_bind_conn_to_session(clp, status);
  7610. if (status == 0) {
  7611. if (memcmp(res.sessionid.data,
  7612. clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
  7613. dprintk("NFS: %s: Session ID mismatch\n", __func__);
  7614. return -EIO;
  7615. }
  7616. if ((res.dir & args.dir) != res.dir || res.dir == 0) {
  7617. dprintk("NFS: %s: Unexpected direction from server\n",
  7618. __func__);
  7619. return -EIO;
  7620. }
  7621. if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
  7622. dprintk("NFS: %s: Server returned RDMA mode = true\n",
  7623. __func__);
  7624. return -EIO;
  7625. }
  7626. }
  7627. return status;
  7628. }
  7629. struct rpc_bind_conn_calldata {
  7630. struct nfs_client *clp;
  7631. const struct cred *cred;
  7632. };
  7633. static int
  7634. nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
  7635. struct rpc_xprt *xprt,
  7636. void *calldata)
  7637. {
  7638. struct rpc_bind_conn_calldata *p = calldata;
  7639. return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
  7640. }
  7641. int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, const struct cred *cred)
  7642. {
  7643. struct rpc_bind_conn_calldata data = {
  7644. .clp = clp,
  7645. .cred = cred,
  7646. };
  7647. return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
  7648. nfs4_proc_bind_conn_to_session_callback, &data);
  7649. }
  7650. /*
  7651. * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
  7652. * and operations we'd like to see to enable certain features in the allow map
  7653. */
  7654. static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
  7655. .how = SP4_MACH_CRED,
  7656. .enforce.u.words = {
  7657. [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
  7658. 1 << (OP_EXCHANGE_ID - 32) |
  7659. 1 << (OP_CREATE_SESSION - 32) |
  7660. 1 << (OP_DESTROY_SESSION - 32) |
  7661. 1 << (OP_DESTROY_CLIENTID - 32)
  7662. },
  7663. .allow.u.words = {
  7664. [0] = 1 << (OP_CLOSE) |
  7665. 1 << (OP_OPEN_DOWNGRADE) |
  7666. 1 << (OP_LOCKU) |
  7667. 1 << (OP_DELEGRETURN) |
  7668. 1 << (OP_COMMIT),
  7669. [1] = 1 << (OP_SECINFO - 32) |
  7670. 1 << (OP_SECINFO_NO_NAME - 32) |
  7671. 1 << (OP_LAYOUTRETURN - 32) |
  7672. 1 << (OP_TEST_STATEID - 32) |
  7673. 1 << (OP_FREE_STATEID - 32) |
  7674. 1 << (OP_WRITE - 32)
  7675. }
  7676. };
  7677. /*
  7678. * Select the state protection mode for client `clp' given the server results
  7679. * from exchange_id in `sp'.
  7680. *
  7681. * Returns 0 on success, negative errno otherwise.
  7682. */
  7683. static int nfs4_sp4_select_mode(struct nfs_client *clp,
  7684. struct nfs41_state_protection *sp)
  7685. {
  7686. static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
  7687. [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
  7688. 1 << (OP_EXCHANGE_ID - 32) |
  7689. 1 << (OP_CREATE_SESSION - 32) |
  7690. 1 << (OP_DESTROY_SESSION - 32) |
  7691. 1 << (OP_DESTROY_CLIENTID - 32)
  7692. };
  7693. unsigned long flags = 0;
  7694. unsigned int i;
  7695. int ret = 0;
  7696. if (sp->how == SP4_MACH_CRED) {
  7697. /* Print state protect result */
  7698. dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
  7699. for (i = 0; i <= LAST_NFS4_OP; i++) {
  7700. if (test_bit(i, sp->enforce.u.longs))
  7701. dfprintk(MOUNT, " enforce op %d\n", i);
  7702. if (test_bit(i, sp->allow.u.longs))
  7703. dfprintk(MOUNT, " allow op %d\n", i);
  7704. }
  7705. /* make sure nothing is on enforce list that isn't supported */
  7706. for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
  7707. if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
  7708. dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
  7709. ret = -EINVAL;
  7710. goto out;
  7711. }
  7712. }
  7713. /*
  7714. * Minimal mode - state operations are allowed to use machine
  7715. * credential. Note this already happens by default, so the
  7716. * client doesn't have to do anything more than the negotiation.
  7717. *
  7718. * NOTE: we don't care if EXCHANGE_ID is in the list -
  7719. * we're already using the machine cred for exchange_id
  7720. * and will never use a different cred.
  7721. */
  7722. if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
  7723. test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
  7724. test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
  7725. test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
  7726. dfprintk(MOUNT, "sp4_mach_cred:\n");
  7727. dfprintk(MOUNT, " minimal mode enabled\n");
  7728. __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
  7729. } else {
  7730. dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
  7731. ret = -EINVAL;
  7732. goto out;
  7733. }
  7734. if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
  7735. test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
  7736. test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
  7737. test_bit(OP_LOCKU, sp->allow.u.longs)) {
  7738. dfprintk(MOUNT, " cleanup mode enabled\n");
  7739. __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
  7740. }
  7741. if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
  7742. dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
  7743. __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
  7744. }
  7745. if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
  7746. test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
  7747. dfprintk(MOUNT, " secinfo mode enabled\n");
  7748. __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
  7749. }
  7750. if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
  7751. test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
  7752. dfprintk(MOUNT, " stateid mode enabled\n");
  7753. __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
  7754. }
  7755. if (test_bit(OP_WRITE, sp->allow.u.longs)) {
  7756. dfprintk(MOUNT, " write mode enabled\n");
  7757. __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
  7758. }
  7759. if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
  7760. dfprintk(MOUNT, " commit mode enabled\n");
  7761. __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
  7762. }
  7763. }
  7764. out:
  7765. clp->cl_sp4_flags = flags;
  7766. return ret;
  7767. }
  7768. struct nfs41_exchange_id_data {
  7769. struct nfs41_exchange_id_res res;
  7770. struct nfs41_exchange_id_args args;
  7771. };
  7772. static void nfs4_exchange_id_release(void *data)
  7773. {
  7774. struct nfs41_exchange_id_data *cdata =
  7775. (struct nfs41_exchange_id_data *)data;
  7776. nfs_put_client(cdata->args.client);
  7777. kfree(cdata->res.impl_id);
  7778. kfree(cdata->res.server_scope);
  7779. kfree(cdata->res.server_owner);
  7780. kfree(cdata);
  7781. }
  7782. static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
  7783. .rpc_release = nfs4_exchange_id_release,
  7784. };
  7785. /*
  7786. * _nfs4_proc_exchange_id()
  7787. *
  7788. * Wrapper for EXCHANGE_ID operation.
  7789. */
  7790. static struct rpc_task *
  7791. nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
  7792. u32 sp4_how, struct rpc_xprt *xprt)
  7793. {
  7794. struct rpc_message msg = {
  7795. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
  7796. .rpc_cred = cred,
  7797. };
  7798. struct rpc_task_setup task_setup_data = {
  7799. .rpc_client = clp->cl_rpcclient,
  7800. .callback_ops = &nfs4_exchange_id_call_ops,
  7801. .rpc_message = &msg,
  7802. .flags = RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN,
  7803. };
  7804. struct nfs41_exchange_id_data *calldata;
  7805. int status;
  7806. if (!refcount_inc_not_zero(&clp->cl_count))
  7807. return ERR_PTR(-EIO);
  7808. status = -ENOMEM;
  7809. calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
  7810. if (!calldata)
  7811. goto out;
  7812. nfs4_init_boot_verifier(clp, &calldata->args.verifier);
  7813. status = nfs4_init_uniform_client_string(clp);
  7814. if (status)
  7815. goto out_calldata;
  7816. calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
  7817. GFP_NOFS);
  7818. status = -ENOMEM;
  7819. if (unlikely(calldata->res.server_owner == NULL))
  7820. goto out_calldata;
  7821. calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
  7822. GFP_NOFS);
  7823. if (unlikely(calldata->res.server_scope == NULL))
  7824. goto out_server_owner;
  7825. calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
  7826. if (unlikely(calldata->res.impl_id == NULL))
  7827. goto out_server_scope;
  7828. switch (sp4_how) {
  7829. case SP4_NONE:
  7830. calldata->args.state_protect.how = SP4_NONE;
  7831. break;
  7832. case SP4_MACH_CRED:
  7833. calldata->args.state_protect = nfs4_sp4_mach_cred_request;
  7834. break;
  7835. default:
  7836. /* unsupported! */
  7837. WARN_ON_ONCE(1);
  7838. status = -EINVAL;
  7839. goto out_impl_id;
  7840. }
  7841. if (xprt) {
  7842. task_setup_data.rpc_xprt = xprt;
  7843. task_setup_data.flags |= RPC_TASK_SOFTCONN;
  7844. memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
  7845. sizeof(calldata->args.verifier.data));
  7846. }
  7847. calldata->args.client = clp;
  7848. calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
  7849. EXCHGID4_FLAG_BIND_PRINC_STATEID;
  7850. #ifdef CONFIG_NFS_V4_1_MIGRATION
  7851. calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
  7852. #endif
  7853. if (test_bit(NFS_CS_DS, &clp->cl_flags))
  7854. calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
  7855. msg.rpc_argp = &calldata->args;
  7856. msg.rpc_resp = &calldata->res;
  7857. task_setup_data.callback_data = calldata;
  7858. return rpc_run_task(&task_setup_data);
  7859. out_impl_id:
  7860. kfree(calldata->res.impl_id);
  7861. out_server_scope:
  7862. kfree(calldata->res.server_scope);
  7863. out_server_owner:
  7864. kfree(calldata->res.server_owner);
  7865. out_calldata:
  7866. kfree(calldata);
  7867. out:
  7868. nfs_put_client(clp);
  7869. return ERR_PTR(status);
  7870. }
  7871. /*
  7872. * _nfs4_proc_exchange_id()
  7873. *
  7874. * Wrapper for EXCHANGE_ID operation.
  7875. */
  7876. static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred,
  7877. u32 sp4_how)
  7878. {
  7879. struct rpc_task *task;
  7880. struct nfs41_exchange_id_args *argp;
  7881. struct nfs41_exchange_id_res *resp;
  7882. unsigned long now = jiffies;
  7883. int status;
  7884. task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
  7885. if (IS_ERR(task))
  7886. return PTR_ERR(task);
  7887. argp = task->tk_msg.rpc_argp;
  7888. resp = task->tk_msg.rpc_resp;
  7889. status = task->tk_status;
  7890. if (status != 0)
  7891. goto out;
  7892. status = nfs4_check_cl_exchange_flags(resp->flags,
  7893. clp->cl_mvops->minor_version);
  7894. if (status != 0)
  7895. goto out;
  7896. status = nfs4_sp4_select_mode(clp, &resp->state_protect);
  7897. if (status != 0)
  7898. goto out;
  7899. do_renew_lease(clp, now);
  7900. clp->cl_clientid = resp->clientid;
  7901. clp->cl_exchange_flags = resp->flags;
  7902. clp->cl_seqid = resp->seqid;
  7903. /* Client ID is not confirmed */
  7904. if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
  7905. clear_bit(NFS4_SESSION_ESTABLISHED,
  7906. &clp->cl_session->session_state);
  7907. if (clp->cl_serverscope != NULL &&
  7908. !nfs41_same_server_scope(clp->cl_serverscope,
  7909. resp->server_scope)) {
  7910. dprintk("%s: server_scope mismatch detected\n",
  7911. __func__);
  7912. set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
  7913. }
  7914. swap(clp->cl_serverowner, resp->server_owner);
  7915. swap(clp->cl_serverscope, resp->server_scope);
  7916. swap(clp->cl_implid, resp->impl_id);
  7917. /* Save the EXCHANGE_ID verifier session trunk tests */
  7918. memcpy(clp->cl_confirm.data, argp->verifier.data,
  7919. sizeof(clp->cl_confirm.data));
  7920. out:
  7921. trace_nfs4_exchange_id(clp, status);
  7922. rpc_put_task(task);
  7923. return status;
  7924. }
  7925. /*
  7926. * nfs4_proc_exchange_id()
  7927. *
  7928. * Returns zero, a negative errno, or a negative NFS4ERR status code.
  7929. *
  7930. * Since the clientid has expired, all compounds using sessions
  7931. * associated with the stale clientid will be returning
  7932. * NFS4ERR_BADSESSION in the sequence operation, and will therefore
  7933. * be in some phase of session reset.
  7934. *
  7935. * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
  7936. */
  7937. int nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cred)
  7938. {
  7939. rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
  7940. int status;
  7941. /* try SP4_MACH_CRED if krb5i/p */
  7942. if (authflavor == RPC_AUTH_GSS_KRB5I ||
  7943. authflavor == RPC_AUTH_GSS_KRB5P) {
  7944. status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
  7945. if (!status)
  7946. return 0;
  7947. }
  7948. /* try SP4_NONE */
  7949. return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
  7950. }
  7951. /**
  7952. * nfs4_test_session_trunk
  7953. *
  7954. * This is an add_xprt_test() test function called from
  7955. * rpc_clnt_setup_test_and_add_xprt.
  7956. *
  7957. * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
  7958. * and is dereferrenced in nfs4_exchange_id_release
  7959. *
  7960. * Upon success, add the new transport to the rpc_clnt
  7961. *
  7962. * @clnt: struct rpc_clnt to get new transport
  7963. * @xprt: the rpc_xprt to test
  7964. * @data: call data for _nfs4_proc_exchange_id.
  7965. */
  7966. void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
  7967. void *data)
  7968. {
  7969. struct nfs4_add_xprt_data *adata = data;
  7970. struct rpc_task *task;
  7971. int status;
  7972. u32 sp4_how;
  7973. dprintk("--> %s try %s\n", __func__,
  7974. xprt->address_strings[RPC_DISPLAY_ADDR]);
  7975. sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
  7976. try_again:
  7977. /* Test connection for session trunking. Async exchange_id call */
  7978. task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
  7979. if (IS_ERR(task))
  7980. return;
  7981. status = task->tk_status;
  7982. if (status == 0)
  7983. status = nfs4_detect_session_trunking(adata->clp,
  7984. task->tk_msg.rpc_resp, xprt);
  7985. if (status == 0)
  7986. rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
  7987. else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
  7988. (struct sockaddr *)&xprt->addr))
  7989. rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
  7990. rpc_put_task(task);
  7991. if (status == -NFS4ERR_DELAY) {
  7992. ssleep(1);
  7993. goto try_again;
  7994. }
  7995. }
  7996. EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
  7997. static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
  7998. const struct cred *cred)
  7999. {
  8000. struct rpc_message msg = {
  8001. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
  8002. .rpc_argp = clp,
  8003. .rpc_cred = cred,
  8004. };
  8005. int status;
  8006. status = rpc_call_sync(clp->cl_rpcclient, &msg,
  8007. RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
  8008. trace_nfs4_destroy_clientid(clp, status);
  8009. if (status)
  8010. dprintk("NFS: Got error %d from the server %s on "
  8011. "DESTROY_CLIENTID.", status, clp->cl_hostname);
  8012. return status;
  8013. }
  8014. static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
  8015. const struct cred *cred)
  8016. {
  8017. unsigned int loop;
  8018. int ret;
  8019. for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
  8020. ret = _nfs4_proc_destroy_clientid(clp, cred);
  8021. switch (ret) {
  8022. case -NFS4ERR_DELAY:
  8023. case -NFS4ERR_CLIENTID_BUSY:
  8024. ssleep(1);
  8025. break;
  8026. default:
  8027. return ret;
  8028. }
  8029. }
  8030. return 0;
  8031. }
  8032. int nfs4_destroy_clientid(struct nfs_client *clp)
  8033. {
  8034. const struct cred *cred;
  8035. int ret = 0;
  8036. if (clp->cl_mvops->minor_version < 1)
  8037. goto out;
  8038. if (clp->cl_exchange_flags == 0)
  8039. goto out;
  8040. if (clp->cl_preserve_clid)
  8041. goto out;
  8042. cred = nfs4_get_clid_cred(clp);
  8043. ret = nfs4_proc_destroy_clientid(clp, cred);
  8044. put_cred(cred);
  8045. switch (ret) {
  8046. case 0:
  8047. case -NFS4ERR_STALE_CLIENTID:
  8048. clp->cl_exchange_flags = 0;
  8049. }
  8050. out:
  8051. return ret;
  8052. }
  8053. #endif /* CONFIG_NFS_V4_1 */
  8054. struct nfs4_get_lease_time_data {
  8055. struct nfs4_get_lease_time_args *args;
  8056. struct nfs4_get_lease_time_res *res;
  8057. struct nfs_client *clp;
  8058. };
  8059. static void nfs4_get_lease_time_prepare(struct rpc_task *task,
  8060. void *calldata)
  8061. {
  8062. struct nfs4_get_lease_time_data *data =
  8063. (struct nfs4_get_lease_time_data *)calldata;
  8064. /* just setup sequence, do not trigger session recovery
  8065. since we're invoked within one */
  8066. nfs4_setup_sequence(data->clp,
  8067. &data->args->la_seq_args,
  8068. &data->res->lr_seq_res,
  8069. task);
  8070. }
  8071. /*
  8072. * Called from nfs4_state_manager thread for session setup, so don't recover
  8073. * from sequence operation or clientid errors.
  8074. */
  8075. static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
  8076. {
  8077. struct nfs4_get_lease_time_data *data =
  8078. (struct nfs4_get_lease_time_data *)calldata;
  8079. if (!nfs4_sequence_done(task, &data->res->lr_seq_res))
  8080. return;
  8081. switch (task->tk_status) {
  8082. case -NFS4ERR_DELAY:
  8083. case -NFS4ERR_GRACE:
  8084. rpc_delay(task, NFS4_POLL_RETRY_MIN);
  8085. task->tk_status = 0;
  8086. fallthrough;
  8087. case -NFS4ERR_RETRY_UNCACHED_REP:
  8088. rpc_restart_call_prepare(task);
  8089. return;
  8090. }
  8091. }
  8092. static const struct rpc_call_ops nfs4_get_lease_time_ops = {
  8093. .rpc_call_prepare = nfs4_get_lease_time_prepare,
  8094. .rpc_call_done = nfs4_get_lease_time_done,
  8095. };
  8096. int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
  8097. {
  8098. struct nfs4_get_lease_time_args args;
  8099. struct nfs4_get_lease_time_res res = {
  8100. .lr_fsinfo = fsinfo,
  8101. };
  8102. struct nfs4_get_lease_time_data data = {
  8103. .args = &args,
  8104. .res = &res,
  8105. .clp = clp,
  8106. };
  8107. struct rpc_message msg = {
  8108. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
  8109. .rpc_argp = &args,
  8110. .rpc_resp = &res,
  8111. };
  8112. struct rpc_task_setup task_setup = {
  8113. .rpc_client = clp->cl_rpcclient,
  8114. .rpc_message = &msg,
  8115. .callback_ops = &nfs4_get_lease_time_ops,
  8116. .callback_data = &data,
  8117. .flags = RPC_TASK_TIMEOUT,
  8118. };
  8119. nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0, 1);
  8120. return nfs4_call_sync_custom(&task_setup);
  8121. }
  8122. #ifdef CONFIG_NFS_V4_1
  8123. /*
  8124. * Initialize the values to be used by the client in CREATE_SESSION
  8125. * If nfs4_init_session set the fore channel request and response sizes,
  8126. * use them.
  8127. *
  8128. * Set the back channel max_resp_sz_cached to zero to force the client to
  8129. * always set csa_cachethis to FALSE because the current implementation
  8130. * of the back channel DRC only supports caching the CB_SEQUENCE operation.
  8131. */
  8132. static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
  8133. struct rpc_clnt *clnt)
  8134. {
  8135. unsigned int max_rqst_sz, max_resp_sz;
  8136. unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
  8137. unsigned int max_bc_slots = rpc_num_bc_slots(clnt);
  8138. max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
  8139. max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
  8140. /* Fore channel attributes */
  8141. args->fc_attrs.max_rqst_sz = max_rqst_sz;
  8142. args->fc_attrs.max_resp_sz = max_resp_sz;
  8143. args->fc_attrs.max_ops = NFS4_MAX_OPS;
  8144. args->fc_attrs.max_reqs = max_session_slots;
  8145. dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
  8146. "max_ops=%u max_reqs=%u\n",
  8147. __func__,
  8148. args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
  8149. args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
  8150. /* Back channel attributes */
  8151. args->bc_attrs.max_rqst_sz = max_bc_payload;
  8152. args->bc_attrs.max_resp_sz = max_bc_payload;
  8153. args->bc_attrs.max_resp_sz_cached = 0;
  8154. args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
  8155. args->bc_attrs.max_reqs = max_t(unsigned short, max_session_cb_slots, 1);
  8156. if (args->bc_attrs.max_reqs > max_bc_slots)
  8157. args->bc_attrs.max_reqs = max_bc_slots;
  8158. dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
  8159. "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
  8160. __func__,
  8161. args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
  8162. args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
  8163. args->bc_attrs.max_reqs);
  8164. }
  8165. static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
  8166. struct nfs41_create_session_res *res)
  8167. {
  8168. struct nfs4_channel_attrs *sent = &args->fc_attrs;
  8169. struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
  8170. if (rcvd->max_resp_sz > sent->max_resp_sz)
  8171. return -EINVAL;
  8172. /*
  8173. * Our requested max_ops is the minimum we need; we're not
  8174. * prepared to break up compounds into smaller pieces than that.
  8175. * So, no point even trying to continue if the server won't
  8176. * cooperate:
  8177. */
  8178. if (rcvd->max_ops < sent->max_ops)
  8179. return -EINVAL;
  8180. if (rcvd->max_reqs == 0)
  8181. return -EINVAL;
  8182. if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
  8183. rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
  8184. return 0;
  8185. }
  8186. static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
  8187. struct nfs41_create_session_res *res)
  8188. {
  8189. struct nfs4_channel_attrs *sent = &args->bc_attrs;
  8190. struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
  8191. if (!(res->flags & SESSION4_BACK_CHAN))
  8192. goto out;
  8193. if (rcvd->max_rqst_sz > sent->max_rqst_sz)
  8194. return -EINVAL;
  8195. if (rcvd->max_resp_sz < sent->max_resp_sz)
  8196. return -EINVAL;
  8197. if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
  8198. return -EINVAL;
  8199. if (rcvd->max_ops > sent->max_ops)
  8200. return -EINVAL;
  8201. if (rcvd->max_reqs > sent->max_reqs)
  8202. return -EINVAL;
  8203. out:
  8204. return 0;
  8205. }
  8206. static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
  8207. struct nfs41_create_session_res *res)
  8208. {
  8209. int ret;
  8210. ret = nfs4_verify_fore_channel_attrs(args, res);
  8211. if (ret)
  8212. return ret;
  8213. return nfs4_verify_back_channel_attrs(args, res);
  8214. }
  8215. static void nfs4_update_session(struct nfs4_session *session,
  8216. struct nfs41_create_session_res *res)
  8217. {
  8218. nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
  8219. /* Mark client id and session as being confirmed */
  8220. session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
  8221. set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
  8222. session->flags = res->flags;
  8223. memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
  8224. if (res->flags & SESSION4_BACK_CHAN)
  8225. memcpy(&session->bc_attrs, &res->bc_attrs,
  8226. sizeof(session->bc_attrs));
  8227. }
  8228. static int _nfs4_proc_create_session(struct nfs_client *clp,
  8229. const struct cred *cred)
  8230. {
  8231. struct nfs4_session *session = clp->cl_session;
  8232. struct nfs41_create_session_args args = {
  8233. .client = clp,
  8234. .clientid = clp->cl_clientid,
  8235. .seqid = clp->cl_seqid,
  8236. .cb_program = NFS4_CALLBACK,
  8237. };
  8238. struct nfs41_create_session_res res;
  8239. struct rpc_message msg = {
  8240. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
  8241. .rpc_argp = &args,
  8242. .rpc_resp = &res,
  8243. .rpc_cred = cred,
  8244. };
  8245. int status;
  8246. nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
  8247. args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
  8248. status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
  8249. RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
  8250. trace_nfs4_create_session(clp, status);
  8251. switch (status) {
  8252. case -NFS4ERR_STALE_CLIENTID:
  8253. case -NFS4ERR_DELAY:
  8254. case -ETIMEDOUT:
  8255. case -EACCES:
  8256. case -EAGAIN:
  8257. goto out;
  8258. }
  8259. clp->cl_seqid++;
  8260. if (!status) {
  8261. /* Verify the session's negotiated channel_attrs values */
  8262. status = nfs4_verify_channel_attrs(&args, &res);
  8263. /* Increment the clientid slot sequence id */
  8264. if (status)
  8265. goto out;
  8266. nfs4_update_session(session, &res);
  8267. }
  8268. out:
  8269. return status;
  8270. }
  8271. /*
  8272. * Issues a CREATE_SESSION operation to the server.
  8273. * It is the responsibility of the caller to verify the session is
  8274. * expired before calling this routine.
  8275. */
  8276. int nfs4_proc_create_session(struct nfs_client *clp, const struct cred *cred)
  8277. {
  8278. int status;
  8279. unsigned *ptr;
  8280. struct nfs4_session *session = clp->cl_session;
  8281. struct nfs4_add_xprt_data xprtdata = {
  8282. .clp = clp,
  8283. };
  8284. struct rpc_add_xprt_test rpcdata = {
  8285. .add_xprt_test = clp->cl_mvops->session_trunk,
  8286. .data = &xprtdata,
  8287. };
  8288. dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
  8289. status = _nfs4_proc_create_session(clp, cred);
  8290. if (status)
  8291. goto out;
  8292. /* Init or reset the session slot tables */
  8293. status = nfs4_setup_session_slot_tables(session);
  8294. dprintk("slot table setup returned %d\n", status);
  8295. if (status)
  8296. goto out;
  8297. ptr = (unsigned *)&session->sess_id.data[0];
  8298. dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
  8299. clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
  8300. rpc_clnt_probe_trunked_xprts(clp->cl_rpcclient, &rpcdata);
  8301. out:
  8302. return status;
  8303. }
  8304. /*
  8305. * Issue the over-the-wire RPC DESTROY_SESSION.
  8306. * The caller must serialize access to this routine.
  8307. */
  8308. int nfs4_proc_destroy_session(struct nfs4_session *session,
  8309. const struct cred *cred)
  8310. {
  8311. struct rpc_message msg = {
  8312. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
  8313. .rpc_argp = session,
  8314. .rpc_cred = cred,
  8315. };
  8316. int status = 0;
  8317. /* session is still being setup */
  8318. if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
  8319. return 0;
  8320. status = rpc_call_sync(session->clp->cl_rpcclient, &msg,
  8321. RPC_TASK_TIMEOUT | RPC_TASK_NO_ROUND_ROBIN);
  8322. trace_nfs4_destroy_session(session->clp, status);
  8323. if (status)
  8324. dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
  8325. "Session has been destroyed regardless...\n", status);
  8326. rpc_clnt_manage_trunked_xprts(session->clp->cl_rpcclient);
  8327. return status;
  8328. }
  8329. /*
  8330. * Renew the cl_session lease.
  8331. */
  8332. struct nfs4_sequence_data {
  8333. struct nfs_client *clp;
  8334. struct nfs4_sequence_args args;
  8335. struct nfs4_sequence_res res;
  8336. };
  8337. static void nfs41_sequence_release(void *data)
  8338. {
  8339. struct nfs4_sequence_data *calldata = data;
  8340. struct nfs_client *clp = calldata->clp;
  8341. if (refcount_read(&clp->cl_count) > 1)
  8342. nfs4_schedule_state_renewal(clp);
  8343. nfs_put_client(clp);
  8344. kfree(calldata);
  8345. }
  8346. static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
  8347. {
  8348. switch(task->tk_status) {
  8349. case -NFS4ERR_DELAY:
  8350. rpc_delay(task, NFS4_POLL_RETRY_MAX);
  8351. return -EAGAIN;
  8352. default:
  8353. nfs4_schedule_lease_recovery(clp);
  8354. }
  8355. return 0;
  8356. }
  8357. static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
  8358. {
  8359. struct nfs4_sequence_data *calldata = data;
  8360. struct nfs_client *clp = calldata->clp;
  8361. if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
  8362. return;
  8363. trace_nfs4_sequence(clp, task->tk_status);
  8364. if (task->tk_status < 0) {
  8365. dprintk("%s ERROR %d\n", __func__, task->tk_status);
  8366. if (refcount_read(&clp->cl_count) == 1)
  8367. return;
  8368. if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
  8369. rpc_restart_call_prepare(task);
  8370. return;
  8371. }
  8372. }
  8373. dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
  8374. }
  8375. static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
  8376. {
  8377. struct nfs4_sequence_data *calldata = data;
  8378. struct nfs_client *clp = calldata->clp;
  8379. struct nfs4_sequence_args *args;
  8380. struct nfs4_sequence_res *res;
  8381. args = task->tk_msg.rpc_argp;
  8382. res = task->tk_msg.rpc_resp;
  8383. nfs4_setup_sequence(clp, args, res, task);
  8384. }
  8385. static const struct rpc_call_ops nfs41_sequence_ops = {
  8386. .rpc_call_done = nfs41_sequence_call_done,
  8387. .rpc_call_prepare = nfs41_sequence_prepare,
  8388. .rpc_release = nfs41_sequence_release,
  8389. };
  8390. static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
  8391. const struct cred *cred,
  8392. struct nfs4_slot *slot,
  8393. bool is_privileged)
  8394. {
  8395. struct nfs4_sequence_data *calldata;
  8396. struct rpc_message msg = {
  8397. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
  8398. .rpc_cred = cred,
  8399. };
  8400. struct rpc_task_setup task_setup_data = {
  8401. .rpc_client = clp->cl_rpcclient,
  8402. .rpc_message = &msg,
  8403. .callback_ops = &nfs41_sequence_ops,
  8404. .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT | RPC_TASK_MOVEABLE,
  8405. };
  8406. struct rpc_task *ret;
  8407. ret = ERR_PTR(-EIO);
  8408. if (!refcount_inc_not_zero(&clp->cl_count))
  8409. goto out_err;
  8410. ret = ERR_PTR(-ENOMEM);
  8411. calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
  8412. if (calldata == NULL)
  8413. goto out_put_clp;
  8414. nfs4_init_sequence(&calldata->args, &calldata->res, 0, is_privileged);
  8415. nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
  8416. msg.rpc_argp = &calldata->args;
  8417. msg.rpc_resp = &calldata->res;
  8418. calldata->clp = clp;
  8419. task_setup_data.callback_data = calldata;
  8420. ret = rpc_run_task(&task_setup_data);
  8421. if (IS_ERR(ret))
  8422. goto out_err;
  8423. return ret;
  8424. out_put_clp:
  8425. nfs_put_client(clp);
  8426. out_err:
  8427. nfs41_release_slot(slot);
  8428. return ret;
  8429. }
  8430. static int nfs41_proc_async_sequence(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
  8431. {
  8432. struct rpc_task *task;
  8433. int ret = 0;
  8434. if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
  8435. return -EAGAIN;
  8436. task = _nfs41_proc_sequence(clp, cred, NULL, false);
  8437. if (IS_ERR(task))
  8438. ret = PTR_ERR(task);
  8439. else
  8440. rpc_put_task_async(task);
  8441. dprintk("<-- %s status=%d\n", __func__, ret);
  8442. return ret;
  8443. }
  8444. static int nfs4_proc_sequence(struct nfs_client *clp, const struct cred *cred)
  8445. {
  8446. struct rpc_task *task;
  8447. int ret;
  8448. task = _nfs41_proc_sequence(clp, cred, NULL, true);
  8449. if (IS_ERR(task)) {
  8450. ret = PTR_ERR(task);
  8451. goto out;
  8452. }
  8453. ret = rpc_wait_for_completion_task(task);
  8454. if (!ret)
  8455. ret = task->tk_status;
  8456. rpc_put_task(task);
  8457. out:
  8458. dprintk("<-- %s status=%d\n", __func__, ret);
  8459. return ret;
  8460. }
  8461. struct nfs4_reclaim_complete_data {
  8462. struct nfs_client *clp;
  8463. struct nfs41_reclaim_complete_args arg;
  8464. struct nfs41_reclaim_complete_res res;
  8465. };
  8466. static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
  8467. {
  8468. struct nfs4_reclaim_complete_data *calldata = data;
  8469. nfs4_setup_sequence(calldata->clp,
  8470. &calldata->arg.seq_args,
  8471. &calldata->res.seq_res,
  8472. task);
  8473. }
  8474. static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
  8475. {
  8476. switch(task->tk_status) {
  8477. case 0:
  8478. wake_up_all(&clp->cl_lock_waitq);
  8479. fallthrough;
  8480. case -NFS4ERR_COMPLETE_ALREADY:
  8481. case -NFS4ERR_WRONG_CRED: /* What to do here? */
  8482. break;
  8483. case -NFS4ERR_DELAY:
  8484. rpc_delay(task, NFS4_POLL_RETRY_MAX);
  8485. fallthrough;
  8486. case -NFS4ERR_RETRY_UNCACHED_REP:
  8487. case -EACCES:
  8488. dprintk("%s: failed to reclaim complete error %d for server %s, retrying\n",
  8489. __func__, task->tk_status, clp->cl_hostname);
  8490. return -EAGAIN;
  8491. case -NFS4ERR_BADSESSION:
  8492. case -NFS4ERR_DEADSESSION:
  8493. case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
  8494. break;
  8495. default:
  8496. nfs4_schedule_lease_recovery(clp);
  8497. }
  8498. return 0;
  8499. }
  8500. static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
  8501. {
  8502. struct nfs4_reclaim_complete_data *calldata = data;
  8503. struct nfs_client *clp = calldata->clp;
  8504. struct nfs4_sequence_res *res = &calldata->res.seq_res;
  8505. if (!nfs41_sequence_done(task, res))
  8506. return;
  8507. trace_nfs4_reclaim_complete(clp, task->tk_status);
  8508. if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
  8509. rpc_restart_call_prepare(task);
  8510. return;
  8511. }
  8512. }
  8513. static void nfs4_free_reclaim_complete_data(void *data)
  8514. {
  8515. struct nfs4_reclaim_complete_data *calldata = data;
  8516. kfree(calldata);
  8517. }
  8518. static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
  8519. .rpc_call_prepare = nfs4_reclaim_complete_prepare,
  8520. .rpc_call_done = nfs4_reclaim_complete_done,
  8521. .rpc_release = nfs4_free_reclaim_complete_data,
  8522. };
  8523. /*
  8524. * Issue a global reclaim complete.
  8525. */
  8526. static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
  8527. const struct cred *cred)
  8528. {
  8529. struct nfs4_reclaim_complete_data *calldata;
  8530. struct rpc_message msg = {
  8531. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
  8532. .rpc_cred = cred,
  8533. };
  8534. struct rpc_task_setup task_setup_data = {
  8535. .rpc_client = clp->cl_rpcclient,
  8536. .rpc_message = &msg,
  8537. .callback_ops = &nfs4_reclaim_complete_call_ops,
  8538. .flags = RPC_TASK_NO_ROUND_ROBIN,
  8539. };
  8540. int status = -ENOMEM;
  8541. calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
  8542. if (calldata == NULL)
  8543. goto out;
  8544. calldata->clp = clp;
  8545. calldata->arg.one_fs = 0;
  8546. nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0, 1);
  8547. msg.rpc_argp = &calldata->arg;
  8548. msg.rpc_resp = &calldata->res;
  8549. task_setup_data.callback_data = calldata;
  8550. status = nfs4_call_sync_custom(&task_setup_data);
  8551. out:
  8552. dprintk("<-- %s status=%d\n", __func__, status);
  8553. return status;
  8554. }
  8555. static void
  8556. nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
  8557. {
  8558. struct nfs4_layoutget *lgp = calldata;
  8559. struct nfs_server *server = NFS_SERVER(lgp->args.inode);
  8560. nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
  8561. &lgp->res.seq_res, task);
  8562. }
  8563. static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
  8564. {
  8565. struct nfs4_layoutget *lgp = calldata;
  8566. nfs41_sequence_process(task, &lgp->res.seq_res);
  8567. }
  8568. static int
  8569. nfs4_layoutget_handle_exception(struct rpc_task *task,
  8570. struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
  8571. {
  8572. struct inode *inode = lgp->args.inode;
  8573. struct nfs_server *server = NFS_SERVER(inode);
  8574. struct pnfs_layout_hdr *lo = lgp->lo;
  8575. int nfs4err = task->tk_status;
  8576. int err, status = 0;
  8577. LIST_HEAD(head);
  8578. dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
  8579. nfs4_sequence_free_slot(&lgp->res.seq_res);
  8580. switch (nfs4err) {
  8581. case 0:
  8582. goto out;
  8583. /*
  8584. * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
  8585. * on the file. set tk_status to -ENODATA to tell upper layer to
  8586. * retry go inband.
  8587. */
  8588. case -NFS4ERR_LAYOUTUNAVAILABLE:
  8589. status = -ENODATA;
  8590. goto out;
  8591. /*
  8592. * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
  8593. * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
  8594. */
  8595. case -NFS4ERR_BADLAYOUT:
  8596. status = -EOVERFLOW;
  8597. goto out;
  8598. /*
  8599. * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
  8600. * (or clients) writing to the same RAID stripe except when
  8601. * the minlength argument is 0 (see RFC5661 section 18.43.3).
  8602. *
  8603. * Treat it like we would RECALLCONFLICT -- we retry for a little
  8604. * while, and then eventually give up.
  8605. */
  8606. case -NFS4ERR_LAYOUTTRYLATER:
  8607. if (lgp->args.minlength == 0) {
  8608. status = -EOVERFLOW;
  8609. goto out;
  8610. }
  8611. status = -EBUSY;
  8612. break;
  8613. case -NFS4ERR_RECALLCONFLICT:
  8614. status = -ERECALLCONFLICT;
  8615. break;
  8616. case -NFS4ERR_DELEG_REVOKED:
  8617. case -NFS4ERR_ADMIN_REVOKED:
  8618. case -NFS4ERR_EXPIRED:
  8619. case -NFS4ERR_BAD_STATEID:
  8620. exception->timeout = 0;
  8621. spin_lock(&inode->i_lock);
  8622. /* If the open stateid was bad, then recover it. */
  8623. if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
  8624. !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
  8625. spin_unlock(&inode->i_lock);
  8626. exception->state = lgp->args.ctx->state;
  8627. exception->stateid = &lgp->args.stateid;
  8628. break;
  8629. }
  8630. /*
  8631. * Mark the bad layout state as invalid, then retry
  8632. */
  8633. pnfs_mark_layout_stateid_invalid(lo, &head);
  8634. spin_unlock(&inode->i_lock);
  8635. nfs_commit_inode(inode, 0);
  8636. pnfs_free_lseg_list(&head);
  8637. status = -EAGAIN;
  8638. goto out;
  8639. }
  8640. err = nfs4_handle_exception(server, nfs4err, exception);
  8641. if (!status) {
  8642. if (exception->retry)
  8643. status = -EAGAIN;
  8644. else
  8645. status = err;
  8646. }
  8647. out:
  8648. return status;
  8649. }
  8650. size_t max_response_pages(struct nfs_server *server)
  8651. {
  8652. u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  8653. return nfs_page_array_len(0, max_resp_sz);
  8654. }
  8655. static void nfs4_layoutget_release(void *calldata)
  8656. {
  8657. struct nfs4_layoutget *lgp = calldata;
  8658. nfs4_sequence_free_slot(&lgp->res.seq_res);
  8659. pnfs_layoutget_free(lgp);
  8660. }
  8661. static const struct rpc_call_ops nfs4_layoutget_call_ops = {
  8662. .rpc_call_prepare = nfs4_layoutget_prepare,
  8663. .rpc_call_done = nfs4_layoutget_done,
  8664. .rpc_release = nfs4_layoutget_release,
  8665. };
  8666. struct pnfs_layout_segment *
  8667. nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
  8668. {
  8669. struct inode *inode = lgp->args.inode;
  8670. struct nfs_server *server = NFS_SERVER(inode);
  8671. struct rpc_task *task;
  8672. struct rpc_message msg = {
  8673. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
  8674. .rpc_argp = &lgp->args,
  8675. .rpc_resp = &lgp->res,
  8676. .rpc_cred = lgp->cred,
  8677. };
  8678. struct rpc_task_setup task_setup_data = {
  8679. .rpc_client = server->client,
  8680. .rpc_message = &msg,
  8681. .callback_ops = &nfs4_layoutget_call_ops,
  8682. .callback_data = lgp,
  8683. .flags = RPC_TASK_ASYNC | RPC_TASK_CRED_NOREF |
  8684. RPC_TASK_MOVEABLE,
  8685. };
  8686. struct pnfs_layout_segment *lseg = NULL;
  8687. struct nfs4_exception exception = {
  8688. .inode = inode,
  8689. .timeout = *timeout,
  8690. };
  8691. int status = 0;
  8692. nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
  8693. task = rpc_run_task(&task_setup_data);
  8694. if (IS_ERR(task))
  8695. return ERR_CAST(task);
  8696. status = rpc_wait_for_completion_task(task);
  8697. if (status != 0)
  8698. goto out;
  8699. if (task->tk_status < 0) {
  8700. status = nfs4_layoutget_handle_exception(task, lgp, &exception);
  8701. *timeout = exception.timeout;
  8702. } else if (lgp->res.layoutp->len == 0) {
  8703. status = -EAGAIN;
  8704. *timeout = nfs4_update_delay(&exception.timeout);
  8705. } else
  8706. lseg = pnfs_layout_process(lgp);
  8707. out:
  8708. trace_nfs4_layoutget(lgp->args.ctx,
  8709. &lgp->args.range,
  8710. &lgp->res.range,
  8711. &lgp->res.stateid,
  8712. status);
  8713. rpc_put_task(task);
  8714. dprintk("<-- %s status=%d\n", __func__, status);
  8715. if (status)
  8716. return ERR_PTR(status);
  8717. return lseg;
  8718. }
  8719. static void
  8720. nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
  8721. {
  8722. struct nfs4_layoutreturn *lrp = calldata;
  8723. nfs4_setup_sequence(lrp->clp,
  8724. &lrp->args.seq_args,
  8725. &lrp->res.seq_res,
  8726. task);
  8727. if (!pnfs_layout_is_valid(lrp->args.layout))
  8728. rpc_exit(task, 0);
  8729. }
  8730. static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
  8731. {
  8732. struct nfs4_layoutreturn *lrp = calldata;
  8733. struct nfs_server *server;
  8734. if (!nfs41_sequence_process(task, &lrp->res.seq_res))
  8735. return;
  8736. /*
  8737. * Was there an RPC level error? Assume the call succeeded,
  8738. * and that we need to release the layout
  8739. */
  8740. if (task->tk_rpc_status != 0 && RPC_WAS_SENT(task)) {
  8741. lrp->res.lrs_present = 0;
  8742. return;
  8743. }
  8744. server = NFS_SERVER(lrp->args.inode);
  8745. switch (task->tk_status) {
  8746. case -NFS4ERR_OLD_STATEID:
  8747. if (nfs4_layout_refresh_old_stateid(&lrp->args.stateid,
  8748. &lrp->args.range,
  8749. lrp->args.inode))
  8750. goto out_restart;
  8751. fallthrough;
  8752. default:
  8753. task->tk_status = 0;
  8754. fallthrough;
  8755. case 0:
  8756. break;
  8757. case -NFS4ERR_DELAY:
  8758. if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
  8759. break;
  8760. goto out_restart;
  8761. }
  8762. return;
  8763. out_restart:
  8764. task->tk_status = 0;
  8765. nfs4_sequence_free_slot(&lrp->res.seq_res);
  8766. rpc_restart_call_prepare(task);
  8767. }
  8768. static void nfs4_layoutreturn_release(void *calldata)
  8769. {
  8770. struct nfs4_layoutreturn *lrp = calldata;
  8771. struct pnfs_layout_hdr *lo = lrp->args.layout;
  8772. pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
  8773. lrp->res.lrs_present ? &lrp->res.stateid : NULL);
  8774. nfs4_sequence_free_slot(&lrp->res.seq_res);
  8775. if (lrp->ld_private.ops && lrp->ld_private.ops->free)
  8776. lrp->ld_private.ops->free(&lrp->ld_private);
  8777. pnfs_put_layout_hdr(lrp->args.layout);
  8778. nfs_iput_and_deactive(lrp->inode);
  8779. put_cred(lrp->cred);
  8780. kfree(calldata);
  8781. }
  8782. static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
  8783. .rpc_call_prepare = nfs4_layoutreturn_prepare,
  8784. .rpc_call_done = nfs4_layoutreturn_done,
  8785. .rpc_release = nfs4_layoutreturn_release,
  8786. };
  8787. int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
  8788. {
  8789. struct rpc_task *task;
  8790. struct rpc_message msg = {
  8791. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
  8792. .rpc_argp = &lrp->args,
  8793. .rpc_resp = &lrp->res,
  8794. .rpc_cred = lrp->cred,
  8795. };
  8796. struct rpc_task_setup task_setup_data = {
  8797. .rpc_client = NFS_SERVER(lrp->args.inode)->client,
  8798. .rpc_message = &msg,
  8799. .callback_ops = &nfs4_layoutreturn_call_ops,
  8800. .callback_data = lrp,
  8801. .flags = RPC_TASK_MOVEABLE,
  8802. };
  8803. int status = 0;
  8804. nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
  8805. NFS_SP4_MACH_CRED_PNFS_CLEANUP,
  8806. &task_setup_data.rpc_client, &msg);
  8807. lrp->inode = nfs_igrab_and_active(lrp->args.inode);
  8808. if (!sync) {
  8809. if (!lrp->inode) {
  8810. nfs4_layoutreturn_release(lrp);
  8811. return -EAGAIN;
  8812. }
  8813. task_setup_data.flags |= RPC_TASK_ASYNC;
  8814. }
  8815. if (!lrp->inode)
  8816. nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
  8817. 1);
  8818. else
  8819. nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1,
  8820. 0);
  8821. task = rpc_run_task(&task_setup_data);
  8822. if (IS_ERR(task))
  8823. return PTR_ERR(task);
  8824. if (sync)
  8825. status = task->tk_status;
  8826. trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
  8827. dprintk("<-- %s status=%d\n", __func__, status);
  8828. rpc_put_task(task);
  8829. return status;
  8830. }
  8831. static int
  8832. _nfs4_proc_getdeviceinfo(struct nfs_server *server,
  8833. struct pnfs_device *pdev,
  8834. const struct cred *cred)
  8835. {
  8836. struct nfs4_getdeviceinfo_args args = {
  8837. .pdev = pdev,
  8838. .notify_types = NOTIFY_DEVICEID4_CHANGE |
  8839. NOTIFY_DEVICEID4_DELETE,
  8840. };
  8841. struct nfs4_getdeviceinfo_res res = {
  8842. .pdev = pdev,
  8843. };
  8844. struct rpc_message msg = {
  8845. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
  8846. .rpc_argp = &args,
  8847. .rpc_resp = &res,
  8848. .rpc_cred = cred,
  8849. };
  8850. int status;
  8851. status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
  8852. if (res.notification & ~args.notify_types)
  8853. dprintk("%s: unsupported notification\n", __func__);
  8854. if (res.notification != args.notify_types)
  8855. pdev->nocache = 1;
  8856. trace_nfs4_getdeviceinfo(server, &pdev->dev_id, status);
  8857. dprintk("<-- %s status=%d\n", __func__, status);
  8858. return status;
  8859. }
  8860. int nfs4_proc_getdeviceinfo(struct nfs_server *server,
  8861. struct pnfs_device *pdev,
  8862. const struct cred *cred)
  8863. {
  8864. struct nfs4_exception exception = { };
  8865. int err;
  8866. do {
  8867. err = nfs4_handle_exception(server,
  8868. _nfs4_proc_getdeviceinfo(server, pdev, cred),
  8869. &exception);
  8870. } while (exception.retry);
  8871. return err;
  8872. }
  8873. EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
  8874. static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
  8875. {
  8876. struct nfs4_layoutcommit_data *data = calldata;
  8877. struct nfs_server *server = NFS_SERVER(data->args.inode);
  8878. nfs4_setup_sequence(server->nfs_client,
  8879. &data->args.seq_args,
  8880. &data->res.seq_res,
  8881. task);
  8882. }
  8883. static void
  8884. nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
  8885. {
  8886. struct nfs4_layoutcommit_data *data = calldata;
  8887. struct nfs_server *server = NFS_SERVER(data->args.inode);
  8888. if (!nfs41_sequence_done(task, &data->res.seq_res))
  8889. return;
  8890. switch (task->tk_status) { /* Just ignore these failures */
  8891. case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
  8892. case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
  8893. case -NFS4ERR_BADLAYOUT: /* no layout */
  8894. case -NFS4ERR_GRACE: /* loca_recalim always false */
  8895. task->tk_status = 0;
  8896. break;
  8897. case 0:
  8898. break;
  8899. default:
  8900. if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
  8901. rpc_restart_call_prepare(task);
  8902. return;
  8903. }
  8904. }
  8905. }
  8906. static void nfs4_layoutcommit_release(void *calldata)
  8907. {
  8908. struct nfs4_layoutcommit_data *data = calldata;
  8909. pnfs_cleanup_layoutcommit(data);
  8910. nfs_post_op_update_inode_force_wcc(data->args.inode,
  8911. data->res.fattr);
  8912. put_cred(data->cred);
  8913. nfs_iput_and_deactive(data->inode);
  8914. kfree(data);
  8915. }
  8916. static const struct rpc_call_ops nfs4_layoutcommit_ops = {
  8917. .rpc_call_prepare = nfs4_layoutcommit_prepare,
  8918. .rpc_call_done = nfs4_layoutcommit_done,
  8919. .rpc_release = nfs4_layoutcommit_release,
  8920. };
  8921. int
  8922. nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
  8923. {
  8924. struct rpc_message msg = {
  8925. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
  8926. .rpc_argp = &data->args,
  8927. .rpc_resp = &data->res,
  8928. .rpc_cred = data->cred,
  8929. };
  8930. struct rpc_task_setup task_setup_data = {
  8931. .task = &data->task,
  8932. .rpc_client = NFS_CLIENT(data->args.inode),
  8933. .rpc_message = &msg,
  8934. .callback_ops = &nfs4_layoutcommit_ops,
  8935. .callback_data = data,
  8936. .flags = RPC_TASK_MOVEABLE,
  8937. };
  8938. struct rpc_task *task;
  8939. int status = 0;
  8940. dprintk("NFS: initiating layoutcommit call. sync %d "
  8941. "lbw: %llu inode %lu\n", sync,
  8942. data->args.lastbytewritten,
  8943. data->args.inode->i_ino);
  8944. if (!sync) {
  8945. data->inode = nfs_igrab_and_active(data->args.inode);
  8946. if (data->inode == NULL) {
  8947. nfs4_layoutcommit_release(data);
  8948. return -EAGAIN;
  8949. }
  8950. task_setup_data.flags = RPC_TASK_ASYNC;
  8951. }
  8952. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
  8953. task = rpc_run_task(&task_setup_data);
  8954. if (IS_ERR(task))
  8955. return PTR_ERR(task);
  8956. if (sync)
  8957. status = task->tk_status;
  8958. trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
  8959. dprintk("%s: status %d\n", __func__, status);
  8960. rpc_put_task(task);
  8961. return status;
  8962. }
  8963. /*
  8964. * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
  8965. * possible) as per RFC3530bis and RFC5661 Security Considerations sections
  8966. */
  8967. static int
  8968. _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
  8969. struct nfs_fsinfo *info,
  8970. struct nfs4_secinfo_flavors *flavors, bool use_integrity)
  8971. {
  8972. struct nfs41_secinfo_no_name_args args = {
  8973. .style = SECINFO_STYLE_CURRENT_FH,
  8974. };
  8975. struct nfs4_secinfo_res res = {
  8976. .flavors = flavors,
  8977. };
  8978. struct rpc_message msg = {
  8979. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
  8980. .rpc_argp = &args,
  8981. .rpc_resp = &res,
  8982. };
  8983. struct nfs4_call_sync_data data = {
  8984. .seq_server = server,
  8985. .seq_args = &args.seq_args,
  8986. .seq_res = &res.seq_res,
  8987. };
  8988. struct rpc_task_setup task_setup = {
  8989. .rpc_client = server->client,
  8990. .rpc_message = &msg,
  8991. .callback_ops = server->nfs_client->cl_mvops->call_sync_ops,
  8992. .callback_data = &data,
  8993. .flags = RPC_TASK_NO_ROUND_ROBIN,
  8994. };
  8995. const struct cred *cred = NULL;
  8996. int status;
  8997. if (use_integrity) {
  8998. task_setup.rpc_client = server->nfs_client->cl_rpcclient;
  8999. cred = nfs4_get_clid_cred(server->nfs_client);
  9000. msg.rpc_cred = cred;
  9001. }
  9002. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0);
  9003. status = nfs4_call_sync_custom(&task_setup);
  9004. dprintk("<-- %s status=%d\n", __func__, status);
  9005. put_cred(cred);
  9006. return status;
  9007. }
  9008. static int
  9009. nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
  9010. struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
  9011. {
  9012. struct nfs4_exception exception = {
  9013. .interruptible = true,
  9014. };
  9015. int err;
  9016. do {
  9017. /* first try using integrity protection */
  9018. err = -NFS4ERR_WRONGSEC;
  9019. /* try to use integrity protection with machine cred */
  9020. if (_nfs4_is_integrity_protected(server->nfs_client))
  9021. err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
  9022. flavors, true);
  9023. /*
  9024. * if unable to use integrity protection, or SECINFO with
  9025. * integrity protection returns NFS4ERR_WRONGSEC (which is
  9026. * disallowed by spec, but exists in deployed servers) use
  9027. * the current filesystem's rpc_client and the user cred.
  9028. */
  9029. if (err == -NFS4ERR_WRONGSEC)
  9030. err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
  9031. flavors, false);
  9032. switch (err) {
  9033. case 0:
  9034. case -NFS4ERR_WRONGSEC:
  9035. case -ENOTSUPP:
  9036. goto out;
  9037. default:
  9038. err = nfs4_handle_exception(server, err, &exception);
  9039. }
  9040. } while (exception.retry);
  9041. out:
  9042. return err;
  9043. }
  9044. static int
  9045. nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
  9046. struct nfs_fsinfo *info)
  9047. {
  9048. int err;
  9049. struct page *page;
  9050. rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
  9051. struct nfs4_secinfo_flavors *flavors;
  9052. struct nfs4_secinfo4 *secinfo;
  9053. int i;
  9054. page = alloc_page(GFP_KERNEL);
  9055. if (!page) {
  9056. err = -ENOMEM;
  9057. goto out;
  9058. }
  9059. flavors = page_address(page);
  9060. err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
  9061. /*
  9062. * Fall back on "guess and check" method if
  9063. * the server doesn't support SECINFO_NO_NAME
  9064. */
  9065. if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
  9066. err = nfs4_find_root_sec(server, fhandle, info);
  9067. goto out_freepage;
  9068. }
  9069. if (err)
  9070. goto out_freepage;
  9071. for (i = 0; i < flavors->num_flavors; i++) {
  9072. secinfo = &flavors->flavors[i];
  9073. switch (secinfo->flavor) {
  9074. case RPC_AUTH_NULL:
  9075. case RPC_AUTH_UNIX:
  9076. case RPC_AUTH_GSS:
  9077. flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
  9078. &secinfo->flavor_info);
  9079. break;
  9080. default:
  9081. flavor = RPC_AUTH_MAXFLAVOR;
  9082. break;
  9083. }
  9084. if (!nfs_auth_info_match(&server->auth_info, flavor))
  9085. flavor = RPC_AUTH_MAXFLAVOR;
  9086. if (flavor != RPC_AUTH_MAXFLAVOR) {
  9087. err = nfs4_lookup_root_sec(server, fhandle,
  9088. info, flavor);
  9089. if (!err)
  9090. break;
  9091. }
  9092. }
  9093. if (flavor == RPC_AUTH_MAXFLAVOR)
  9094. err = -EPERM;
  9095. out_freepage:
  9096. put_page(page);
  9097. if (err == -EACCES)
  9098. return -EPERM;
  9099. out:
  9100. return err;
  9101. }
  9102. static int _nfs41_test_stateid(struct nfs_server *server,
  9103. nfs4_stateid *stateid,
  9104. const struct cred *cred)
  9105. {
  9106. int status;
  9107. struct nfs41_test_stateid_args args = {
  9108. .stateid = stateid,
  9109. };
  9110. struct nfs41_test_stateid_res res;
  9111. struct rpc_message msg = {
  9112. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
  9113. .rpc_argp = &args,
  9114. .rpc_resp = &res,
  9115. .rpc_cred = cred,
  9116. };
  9117. struct rpc_clnt *rpc_client = server->client;
  9118. nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
  9119. &rpc_client, &msg);
  9120. dprintk("NFS call test_stateid %p\n", stateid);
  9121. nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 1);
  9122. status = nfs4_call_sync_sequence(rpc_client, server, &msg,
  9123. &args.seq_args, &res.seq_res);
  9124. if (status != NFS_OK) {
  9125. dprintk("NFS reply test_stateid: failed, %d\n", status);
  9126. return status;
  9127. }
  9128. dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
  9129. return -res.status;
  9130. }
  9131. static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
  9132. int err, struct nfs4_exception *exception)
  9133. {
  9134. exception->retry = 0;
  9135. switch(err) {
  9136. case -NFS4ERR_DELAY:
  9137. case -NFS4ERR_RETRY_UNCACHED_REP:
  9138. nfs4_handle_exception(server, err, exception);
  9139. break;
  9140. case -NFS4ERR_BADSESSION:
  9141. case -NFS4ERR_BADSLOT:
  9142. case -NFS4ERR_BAD_HIGH_SLOT:
  9143. case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
  9144. case -NFS4ERR_DEADSESSION:
  9145. nfs4_do_handle_exception(server, err, exception);
  9146. }
  9147. }
  9148. /**
  9149. * nfs41_test_stateid - perform a TEST_STATEID operation
  9150. *
  9151. * @server: server / transport on which to perform the operation
  9152. * @stateid: state ID to test
  9153. * @cred: credential
  9154. *
  9155. * Returns NFS_OK if the server recognizes that "stateid" is valid.
  9156. * Otherwise a negative NFS4ERR value is returned if the operation
  9157. * failed or the state ID is not currently valid.
  9158. */
  9159. static int nfs41_test_stateid(struct nfs_server *server,
  9160. nfs4_stateid *stateid,
  9161. const struct cred *cred)
  9162. {
  9163. struct nfs4_exception exception = {
  9164. .interruptible = true,
  9165. };
  9166. int err;
  9167. do {
  9168. err = _nfs41_test_stateid(server, stateid, cred);
  9169. nfs4_handle_delay_or_session_error(server, err, &exception);
  9170. } while (exception.retry);
  9171. return err;
  9172. }
  9173. struct nfs_free_stateid_data {
  9174. struct nfs_server *server;
  9175. struct nfs41_free_stateid_args args;
  9176. struct nfs41_free_stateid_res res;
  9177. };
  9178. static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
  9179. {
  9180. struct nfs_free_stateid_data *data = calldata;
  9181. nfs4_setup_sequence(data->server->nfs_client,
  9182. &data->args.seq_args,
  9183. &data->res.seq_res,
  9184. task);
  9185. }
  9186. static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
  9187. {
  9188. struct nfs_free_stateid_data *data = calldata;
  9189. nfs41_sequence_done(task, &data->res.seq_res);
  9190. switch (task->tk_status) {
  9191. case -NFS4ERR_DELAY:
  9192. if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
  9193. rpc_restart_call_prepare(task);
  9194. }
  9195. }
  9196. static void nfs41_free_stateid_release(void *calldata)
  9197. {
  9198. struct nfs_free_stateid_data *data = calldata;
  9199. struct nfs_client *clp = data->server->nfs_client;
  9200. nfs_put_client(clp);
  9201. kfree(calldata);
  9202. }
  9203. static const struct rpc_call_ops nfs41_free_stateid_ops = {
  9204. .rpc_call_prepare = nfs41_free_stateid_prepare,
  9205. .rpc_call_done = nfs41_free_stateid_done,
  9206. .rpc_release = nfs41_free_stateid_release,
  9207. };
  9208. /**
  9209. * nfs41_free_stateid - perform a FREE_STATEID operation
  9210. *
  9211. * @server: server / transport on which to perform the operation
  9212. * @stateid: state ID to release
  9213. * @cred: credential
  9214. * @privileged: set to true if this call needs to be privileged
  9215. *
  9216. * Note: this function is always asynchronous.
  9217. */
  9218. static int nfs41_free_stateid(struct nfs_server *server,
  9219. const nfs4_stateid *stateid,
  9220. const struct cred *cred,
  9221. bool privileged)
  9222. {
  9223. struct rpc_message msg = {
  9224. .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
  9225. .rpc_cred = cred,
  9226. };
  9227. struct rpc_task_setup task_setup = {
  9228. .rpc_client = server->client,
  9229. .rpc_message = &msg,
  9230. .callback_ops = &nfs41_free_stateid_ops,
  9231. .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
  9232. };
  9233. struct nfs_free_stateid_data *data;
  9234. struct rpc_task *task;
  9235. struct nfs_client *clp = server->nfs_client;
  9236. if (!refcount_inc_not_zero(&clp->cl_count))
  9237. return -EIO;
  9238. nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
  9239. &task_setup.rpc_client, &msg);
  9240. dprintk("NFS call free_stateid %p\n", stateid);
  9241. data = kmalloc(sizeof(*data), GFP_KERNEL);
  9242. if (!data)
  9243. return -ENOMEM;
  9244. data->server = server;
  9245. nfs4_stateid_copy(&data->args.stateid, stateid);
  9246. task_setup.callback_data = data;
  9247. msg.rpc_argp = &data->args;
  9248. msg.rpc_resp = &data->res;
  9249. nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, privileged);
  9250. task = rpc_run_task(&task_setup);
  9251. if (IS_ERR(task))
  9252. return PTR_ERR(task);
  9253. rpc_put_task(task);
  9254. return 0;
  9255. }
  9256. static void
  9257. nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
  9258. {
  9259. const struct cred *cred = lsp->ls_state->owner->so_cred;
  9260. nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
  9261. nfs4_free_lock_state(server, lsp);
  9262. }
  9263. static bool nfs41_match_stateid(const nfs4_stateid *s1,
  9264. const nfs4_stateid *s2)
  9265. {
  9266. if (s1->type != s2->type)
  9267. return false;
  9268. if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
  9269. return false;
  9270. if (s1->seqid == s2->seqid)
  9271. return true;
  9272. return s1->seqid == 0 || s2->seqid == 0;
  9273. }
  9274. #endif /* CONFIG_NFS_V4_1 */
  9275. static bool nfs4_match_stateid(const nfs4_stateid *s1,
  9276. const nfs4_stateid *s2)
  9277. {
  9278. return nfs4_stateid_match(s1, s2);
  9279. }
  9280. static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
  9281. .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
  9282. .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
  9283. .recover_open = nfs4_open_reclaim,
  9284. .recover_lock = nfs4_lock_reclaim,
  9285. .establish_clid = nfs4_init_clientid,
  9286. .detect_trunking = nfs40_discover_server_trunking,
  9287. };
  9288. #if defined(CONFIG_NFS_V4_1)
  9289. static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
  9290. .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
  9291. .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
  9292. .recover_open = nfs4_open_reclaim,
  9293. .recover_lock = nfs4_lock_reclaim,
  9294. .establish_clid = nfs41_init_clientid,
  9295. .reclaim_complete = nfs41_proc_reclaim_complete,
  9296. .detect_trunking = nfs41_discover_server_trunking,
  9297. };
  9298. #endif /* CONFIG_NFS_V4_1 */
  9299. static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
  9300. .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
  9301. .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
  9302. .recover_open = nfs40_open_expired,
  9303. .recover_lock = nfs4_lock_expired,
  9304. .establish_clid = nfs4_init_clientid,
  9305. };
  9306. #if defined(CONFIG_NFS_V4_1)
  9307. static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
  9308. .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
  9309. .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
  9310. .recover_open = nfs41_open_expired,
  9311. .recover_lock = nfs41_lock_expired,
  9312. .establish_clid = nfs41_init_clientid,
  9313. };
  9314. #endif /* CONFIG_NFS_V4_1 */
  9315. static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
  9316. .sched_state_renewal = nfs4_proc_async_renew,
  9317. .get_state_renewal_cred = nfs4_get_renew_cred,
  9318. .renew_lease = nfs4_proc_renew,
  9319. };
  9320. #if defined(CONFIG_NFS_V4_1)
  9321. static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
  9322. .sched_state_renewal = nfs41_proc_async_sequence,
  9323. .get_state_renewal_cred = nfs4_get_machine_cred,
  9324. .renew_lease = nfs4_proc_sequence,
  9325. };
  9326. #endif
  9327. static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
  9328. .get_locations = _nfs40_proc_get_locations,
  9329. .fsid_present = _nfs40_proc_fsid_present,
  9330. };
  9331. #if defined(CONFIG_NFS_V4_1)
  9332. static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
  9333. .get_locations = _nfs41_proc_get_locations,
  9334. .fsid_present = _nfs41_proc_fsid_present,
  9335. };
  9336. #endif /* CONFIG_NFS_V4_1 */
  9337. static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
  9338. .minor_version = 0,
  9339. .init_caps = NFS_CAP_READDIRPLUS
  9340. | NFS_CAP_ATOMIC_OPEN
  9341. | NFS_CAP_POSIX_LOCK,
  9342. .init_client = nfs40_init_client,
  9343. .shutdown_client = nfs40_shutdown_client,
  9344. .match_stateid = nfs4_match_stateid,
  9345. .find_root_sec = nfs4_find_root_sec,
  9346. .free_lock_state = nfs4_release_lockowner,
  9347. .test_and_free_expired = nfs40_test_and_free_expired_stateid,
  9348. .alloc_seqid = nfs_alloc_seqid,
  9349. .call_sync_ops = &nfs40_call_sync_ops,
  9350. .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
  9351. .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
  9352. .state_renewal_ops = &nfs40_state_renewal_ops,
  9353. .mig_recovery_ops = &nfs40_mig_recovery_ops,
  9354. };
  9355. #if defined(CONFIG_NFS_V4_1)
  9356. static struct nfs_seqid *
  9357. nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
  9358. {
  9359. return NULL;
  9360. }
  9361. static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
  9362. .minor_version = 1,
  9363. .init_caps = NFS_CAP_READDIRPLUS
  9364. | NFS_CAP_ATOMIC_OPEN
  9365. | NFS_CAP_POSIX_LOCK
  9366. | NFS_CAP_STATEID_NFSV41
  9367. | NFS_CAP_ATOMIC_OPEN_V1
  9368. | NFS_CAP_LGOPEN
  9369. | NFS_CAP_MOVEABLE,
  9370. .init_client = nfs41_init_client,
  9371. .shutdown_client = nfs41_shutdown_client,
  9372. .match_stateid = nfs41_match_stateid,
  9373. .find_root_sec = nfs41_find_root_sec,
  9374. .free_lock_state = nfs41_free_lock_state,
  9375. .test_and_free_expired = nfs41_test_and_free_expired_stateid,
  9376. .alloc_seqid = nfs_alloc_no_seqid,
  9377. .session_trunk = nfs4_test_session_trunk,
  9378. .call_sync_ops = &nfs41_call_sync_ops,
  9379. .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
  9380. .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
  9381. .state_renewal_ops = &nfs41_state_renewal_ops,
  9382. .mig_recovery_ops = &nfs41_mig_recovery_ops,
  9383. };
  9384. #endif
  9385. #if defined(CONFIG_NFS_V4_2)
  9386. static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
  9387. .minor_version = 2,
  9388. .init_caps = NFS_CAP_READDIRPLUS
  9389. | NFS_CAP_ATOMIC_OPEN
  9390. | NFS_CAP_POSIX_LOCK
  9391. | NFS_CAP_STATEID_NFSV41
  9392. | NFS_CAP_ATOMIC_OPEN_V1
  9393. | NFS_CAP_LGOPEN
  9394. | NFS_CAP_ALLOCATE
  9395. | NFS_CAP_COPY
  9396. | NFS_CAP_OFFLOAD_CANCEL
  9397. | NFS_CAP_COPY_NOTIFY
  9398. | NFS_CAP_DEALLOCATE
  9399. | NFS_CAP_SEEK
  9400. | NFS_CAP_LAYOUTSTATS
  9401. | NFS_CAP_CLONE
  9402. | NFS_CAP_LAYOUTERROR
  9403. | NFS_CAP_READ_PLUS
  9404. | NFS_CAP_MOVEABLE,
  9405. .init_client = nfs41_init_client,
  9406. .shutdown_client = nfs41_shutdown_client,
  9407. .match_stateid = nfs41_match_stateid,
  9408. .find_root_sec = nfs41_find_root_sec,
  9409. .free_lock_state = nfs41_free_lock_state,
  9410. .call_sync_ops = &nfs41_call_sync_ops,
  9411. .test_and_free_expired = nfs41_test_and_free_expired_stateid,
  9412. .alloc_seqid = nfs_alloc_no_seqid,
  9413. .session_trunk = nfs4_test_session_trunk,
  9414. .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
  9415. .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
  9416. .state_renewal_ops = &nfs41_state_renewal_ops,
  9417. .mig_recovery_ops = &nfs41_mig_recovery_ops,
  9418. };
  9419. #endif
  9420. const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
  9421. [0] = &nfs_v4_0_minor_ops,
  9422. #if defined(CONFIG_NFS_V4_1)
  9423. [1] = &nfs_v4_1_minor_ops,
  9424. #endif
  9425. #if defined(CONFIG_NFS_V4_2)
  9426. [2] = &nfs_v4_2_minor_ops,
  9427. #endif
  9428. };
  9429. static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
  9430. {
  9431. ssize_t error, error2, error3;
  9432. error = generic_listxattr(dentry, list, size);
  9433. if (error < 0)
  9434. return error;
  9435. if (list) {
  9436. list += error;
  9437. size -= error;
  9438. }
  9439. error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
  9440. if (error2 < 0)
  9441. return error2;
  9442. if (list) {
  9443. list += error2;
  9444. size -= error2;
  9445. }
  9446. error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
  9447. if (error3 < 0)
  9448. return error3;
  9449. return error + error2 + error3;
  9450. }
  9451. static void nfs4_enable_swap(struct inode *inode)
  9452. {
  9453. /* The state manager thread must always be running.
  9454. * It will notice the client is a swapper, and stay put.
  9455. */
  9456. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  9457. nfs4_schedule_state_manager(clp);
  9458. }
  9459. static void nfs4_disable_swap(struct inode *inode)
  9460. {
  9461. /* The state manager thread will now exit once it is
  9462. * woken.
  9463. */
  9464. struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
  9465. set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
  9466. clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
  9467. wake_up_var(&clp->cl_state);
  9468. }
  9469. static const struct inode_operations nfs4_dir_inode_operations = {
  9470. .create = nfs_create,
  9471. .lookup = nfs_lookup,
  9472. .atomic_open = nfs_atomic_open,
  9473. .link = nfs_link,
  9474. .unlink = nfs_unlink,
  9475. .symlink = nfs_symlink,
  9476. .mkdir = nfs_mkdir,
  9477. .rmdir = nfs_rmdir,
  9478. .mknod = nfs_mknod,
  9479. .rename = nfs_rename,
  9480. .permission = nfs_permission,
  9481. .getattr = nfs_getattr,
  9482. .setattr = nfs_setattr,
  9483. .listxattr = nfs4_listxattr,
  9484. };
  9485. static const struct inode_operations nfs4_file_inode_operations = {
  9486. .permission = nfs_permission,
  9487. .getattr = nfs_getattr,
  9488. .setattr = nfs_setattr,
  9489. .listxattr = nfs4_listxattr,
  9490. };
  9491. const struct nfs_rpc_ops nfs_v4_clientops = {
  9492. .version = 4, /* protocol version */
  9493. .dentry_ops = &nfs4_dentry_operations,
  9494. .dir_inode_ops = &nfs4_dir_inode_operations,
  9495. .file_inode_ops = &nfs4_file_inode_operations,
  9496. .file_ops = &nfs4_file_operations,
  9497. .getroot = nfs4_proc_get_root,
  9498. .submount = nfs4_submount,
  9499. .try_get_tree = nfs4_try_get_tree,
  9500. .getattr = nfs4_proc_getattr,
  9501. .setattr = nfs4_proc_setattr,
  9502. .lookup = nfs4_proc_lookup,
  9503. .lookupp = nfs4_proc_lookupp,
  9504. .access = nfs4_proc_access,
  9505. .readlink = nfs4_proc_readlink,
  9506. .create = nfs4_proc_create,
  9507. .remove = nfs4_proc_remove,
  9508. .unlink_setup = nfs4_proc_unlink_setup,
  9509. .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
  9510. .unlink_done = nfs4_proc_unlink_done,
  9511. .rename_setup = nfs4_proc_rename_setup,
  9512. .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
  9513. .rename_done = nfs4_proc_rename_done,
  9514. .link = nfs4_proc_link,
  9515. .symlink = nfs4_proc_symlink,
  9516. .mkdir = nfs4_proc_mkdir,
  9517. .rmdir = nfs4_proc_rmdir,
  9518. .readdir = nfs4_proc_readdir,
  9519. .mknod = nfs4_proc_mknod,
  9520. .statfs = nfs4_proc_statfs,
  9521. .fsinfo = nfs4_proc_fsinfo,
  9522. .pathconf = nfs4_proc_pathconf,
  9523. .set_capabilities = nfs4_server_capabilities,
  9524. .decode_dirent = nfs4_decode_dirent,
  9525. .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
  9526. .read_setup = nfs4_proc_read_setup,
  9527. .read_done = nfs4_read_done,
  9528. .write_setup = nfs4_proc_write_setup,
  9529. .write_done = nfs4_write_done,
  9530. .commit_setup = nfs4_proc_commit_setup,
  9531. .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
  9532. .commit_done = nfs4_commit_done,
  9533. .lock = nfs4_proc_lock,
  9534. .clear_acl_cache = nfs4_zap_acl_attr,
  9535. .close_context = nfs4_close_context,
  9536. .open_context = nfs4_atomic_open,
  9537. .have_delegation = nfs4_have_delegation,
  9538. .alloc_client = nfs4_alloc_client,
  9539. .init_client = nfs4_init_client,
  9540. .free_client = nfs4_free_client,
  9541. .create_server = nfs4_create_server,
  9542. .clone_server = nfs_clone_server,
  9543. .discover_trunking = nfs4_discover_trunking,
  9544. .enable_swap = nfs4_enable_swap,
  9545. .disable_swap = nfs4_disable_swap,
  9546. };
  9547. static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
  9548. .name = XATTR_NAME_NFSV4_ACL,
  9549. .list = nfs4_xattr_list_nfs4_acl,
  9550. .get = nfs4_xattr_get_nfs4_acl,
  9551. .set = nfs4_xattr_set_nfs4_acl,
  9552. };
  9553. #if defined(CONFIG_NFS_V4_1)
  9554. static const struct xattr_handler nfs4_xattr_nfs4_dacl_handler = {
  9555. .name = XATTR_NAME_NFSV4_DACL,
  9556. .list = nfs4_xattr_list_nfs4_dacl,
  9557. .get = nfs4_xattr_get_nfs4_dacl,
  9558. .set = nfs4_xattr_set_nfs4_dacl,
  9559. };
  9560. static const struct xattr_handler nfs4_xattr_nfs4_sacl_handler = {
  9561. .name = XATTR_NAME_NFSV4_SACL,
  9562. .list = nfs4_xattr_list_nfs4_sacl,
  9563. .get = nfs4_xattr_get_nfs4_sacl,
  9564. .set = nfs4_xattr_set_nfs4_sacl,
  9565. };
  9566. #endif
  9567. #ifdef CONFIG_NFS_V4_2
  9568. static const struct xattr_handler nfs4_xattr_nfs4_user_handler = {
  9569. .prefix = XATTR_USER_PREFIX,
  9570. .get = nfs4_xattr_get_nfs4_user,
  9571. .set = nfs4_xattr_set_nfs4_user,
  9572. };
  9573. #endif
  9574. const struct xattr_handler *nfs4_xattr_handlers[] = {
  9575. &nfs4_xattr_nfs4_acl_handler,
  9576. #if defined(CONFIG_NFS_V4_1)
  9577. &nfs4_xattr_nfs4_dacl_handler,
  9578. &nfs4_xattr_nfs4_sacl_handler,
  9579. #endif
  9580. #ifdef CONFIG_NFS_V4_SECURITY_LABEL
  9581. &nfs4_xattr_nfs4_label_handler,
  9582. #endif
  9583. #ifdef CONFIG_NFS_V4_2
  9584. &nfs4_xattr_nfs4_user_handler,
  9585. #endif
  9586. NULL
  9587. };