super.c 127 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * fs/f2fs/super.c
  4. *
  5. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6. * http://www.samsung.com/
  7. */
  8. #include <linux/module.h>
  9. #include <linux/init.h>
  10. #include <linux/fs.h>
  11. #include <linux/fs_context.h>
  12. #include <linux/sched/mm.h>
  13. #include <linux/statfs.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/kthread.h>
  16. #include <linux/parser.h>
  17. #include <linux/mount.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/random.h>
  21. #include <linux/exportfs.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/quotaops.h>
  24. #include <linux/f2fs_fs.h>
  25. #include <linux/sysfs.h>
  26. #include <linux/quota.h>
  27. #include <linux/unicode.h>
  28. #include <linux/part_stat.h>
  29. #include <linux/zstd.h>
  30. #include <linux/lz4.h>
  31. #include <linux/cleancache.h>
  32. #include "f2fs.h"
  33. #include "node.h"
  34. #include "segment.h"
  35. #include "xattr.h"
  36. #include "gc.h"
  37. #include "iostat.h"
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/f2fs.h>
  40. static struct kmem_cache *f2fs_inode_cachep;
  41. #ifdef CONFIG_F2FS_FAULT_INJECTION
  42. const char *f2fs_fault_name[FAULT_MAX] = {
  43. [FAULT_KMALLOC] = "kmalloc",
  44. [FAULT_KVMALLOC] = "kvmalloc",
  45. [FAULT_PAGE_ALLOC] = "page alloc",
  46. [FAULT_PAGE_GET] = "page get",
  47. [FAULT_ALLOC_NID] = "alloc nid",
  48. [FAULT_ORPHAN] = "orphan",
  49. [FAULT_BLOCK] = "no more block",
  50. [FAULT_DIR_DEPTH] = "too big dir depth",
  51. [FAULT_EVICT_INODE] = "evict_inode fail",
  52. [FAULT_TRUNCATE] = "truncate fail",
  53. [FAULT_READ_IO] = "read IO error",
  54. [FAULT_CHECKPOINT] = "checkpoint error",
  55. [FAULT_DISCARD] = "discard error",
  56. [FAULT_WRITE_IO] = "write IO error",
  57. [FAULT_SLAB_ALLOC] = "slab alloc",
  58. [FAULT_DQUOT_INIT] = "dquot initialize",
  59. [FAULT_LOCK_OP] = "lock_op",
  60. [FAULT_BLKADDR_VALIDITY] = "invalid blkaddr",
  61. [FAULT_BLKADDR_CONSISTENCE] = "inconsistent blkaddr",
  62. };
  63. void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
  64. unsigned int type)
  65. {
  66. struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
  67. if (rate) {
  68. atomic_set(&ffi->inject_ops, 0);
  69. ffi->inject_rate = rate;
  70. }
  71. if (type)
  72. ffi->inject_type = type;
  73. if (!rate && !type)
  74. memset(ffi, 0, sizeof(struct f2fs_fault_info));
  75. }
  76. #endif
  77. /* f2fs-wide shrinker description */
  78. static struct shrinker f2fs_shrinker_info = {
  79. .scan_objects = f2fs_shrink_scan,
  80. .count_objects = f2fs_shrink_count,
  81. .seeks = DEFAULT_SEEKS,
  82. };
  83. enum {
  84. Opt_gc_background,
  85. Opt_disable_roll_forward,
  86. Opt_norecovery,
  87. Opt_discard,
  88. Opt_nodiscard,
  89. Opt_noheap,
  90. Opt_heap,
  91. Opt_user_xattr,
  92. Opt_nouser_xattr,
  93. Opt_acl,
  94. Opt_noacl,
  95. Opt_active_logs,
  96. Opt_disable_ext_identify,
  97. Opt_inline_xattr,
  98. Opt_noinline_xattr,
  99. Opt_inline_xattr_size,
  100. Opt_inline_data,
  101. Opt_inline_dentry,
  102. Opt_noinline_dentry,
  103. Opt_flush_merge,
  104. Opt_noflush_merge,
  105. Opt_barrier,
  106. Opt_nobarrier,
  107. Opt_fastboot,
  108. Opt_extent_cache,
  109. Opt_noextent_cache,
  110. Opt_noinline_data,
  111. Opt_data_flush,
  112. Opt_reserve_root,
  113. Opt_resgid,
  114. Opt_resuid,
  115. Opt_mode,
  116. Opt_io_size_bits,
  117. Opt_fault_injection,
  118. Opt_fault_type,
  119. Opt_lazytime,
  120. Opt_nolazytime,
  121. Opt_quota,
  122. Opt_noquota,
  123. Opt_usrquota,
  124. Opt_grpquota,
  125. Opt_prjquota,
  126. Opt_usrjquota,
  127. Opt_grpjquota,
  128. Opt_prjjquota,
  129. Opt_offusrjquota,
  130. Opt_offgrpjquota,
  131. Opt_offprjjquota,
  132. Opt_jqfmt_vfsold,
  133. Opt_jqfmt_vfsv0,
  134. Opt_jqfmt_vfsv1,
  135. Opt_alloc,
  136. Opt_fsync,
  137. Opt_test_dummy_encryption,
  138. Opt_inlinecrypt,
  139. Opt_checkpoint_disable,
  140. Opt_checkpoint_disable_cap,
  141. Opt_checkpoint_disable_cap_perc,
  142. Opt_checkpoint_enable,
  143. Opt_checkpoint_merge,
  144. Opt_nocheckpoint_merge,
  145. Opt_compress_algorithm,
  146. Opt_compress_log_size,
  147. Opt_compress_extension,
  148. Opt_nocompress_extension,
  149. Opt_compress_chksum,
  150. Opt_compress_mode,
  151. Opt_compress_cache,
  152. Opt_atgc,
  153. Opt_gc_merge,
  154. Opt_nogc_merge,
  155. Opt_discard_unit,
  156. Opt_memory_mode,
  157. Opt_age_extent_cache,
  158. Opt_err,
  159. };
  160. static match_table_t f2fs_tokens = {
  161. {Opt_gc_background, "background_gc=%s"},
  162. {Opt_disable_roll_forward, "disable_roll_forward"},
  163. {Opt_norecovery, "norecovery"},
  164. {Opt_discard, "discard"},
  165. {Opt_nodiscard, "nodiscard"},
  166. {Opt_noheap, "no_heap"},
  167. {Opt_heap, "heap"},
  168. {Opt_user_xattr, "user_xattr"},
  169. {Opt_nouser_xattr, "nouser_xattr"},
  170. {Opt_acl, "acl"},
  171. {Opt_noacl, "noacl"},
  172. {Opt_active_logs, "active_logs=%u"},
  173. {Opt_disable_ext_identify, "disable_ext_identify"},
  174. {Opt_inline_xattr, "inline_xattr"},
  175. {Opt_noinline_xattr, "noinline_xattr"},
  176. {Opt_inline_xattr_size, "inline_xattr_size=%u"},
  177. {Opt_inline_data, "inline_data"},
  178. {Opt_inline_dentry, "inline_dentry"},
  179. {Opt_noinline_dentry, "noinline_dentry"},
  180. {Opt_flush_merge, "flush_merge"},
  181. {Opt_noflush_merge, "noflush_merge"},
  182. {Opt_barrier, "barrier"},
  183. {Opt_nobarrier, "nobarrier"},
  184. {Opt_fastboot, "fastboot"},
  185. {Opt_extent_cache, "extent_cache"},
  186. {Opt_noextent_cache, "noextent_cache"},
  187. {Opt_noinline_data, "noinline_data"},
  188. {Opt_data_flush, "data_flush"},
  189. {Opt_reserve_root, "reserve_root=%u"},
  190. {Opt_resgid, "resgid=%u"},
  191. {Opt_resuid, "resuid=%u"},
  192. {Opt_mode, "mode=%s"},
  193. {Opt_io_size_bits, "io_bits=%u"},
  194. {Opt_fault_injection, "fault_injection=%u"},
  195. {Opt_fault_type, "fault_type=%u"},
  196. {Opt_lazytime, "lazytime"},
  197. {Opt_nolazytime, "nolazytime"},
  198. {Opt_quota, "quota"},
  199. {Opt_noquota, "noquota"},
  200. {Opt_usrquota, "usrquota"},
  201. {Opt_grpquota, "grpquota"},
  202. {Opt_prjquota, "prjquota"},
  203. {Opt_usrjquota, "usrjquota=%s"},
  204. {Opt_grpjquota, "grpjquota=%s"},
  205. {Opt_prjjquota, "prjjquota=%s"},
  206. {Opt_offusrjquota, "usrjquota="},
  207. {Opt_offgrpjquota, "grpjquota="},
  208. {Opt_offprjjquota, "prjjquota="},
  209. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  210. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  211. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  212. {Opt_alloc, "alloc_mode=%s"},
  213. {Opt_fsync, "fsync_mode=%s"},
  214. {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
  215. {Opt_test_dummy_encryption, "test_dummy_encryption"},
  216. {Opt_inlinecrypt, "inlinecrypt"},
  217. {Opt_checkpoint_disable, "checkpoint=disable"},
  218. {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
  219. {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
  220. {Opt_checkpoint_enable, "checkpoint=enable"},
  221. {Opt_checkpoint_merge, "checkpoint_merge"},
  222. {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
  223. {Opt_compress_algorithm, "compress_algorithm=%s"},
  224. {Opt_compress_log_size, "compress_log_size=%u"},
  225. {Opt_compress_extension, "compress_extension=%s"},
  226. {Opt_nocompress_extension, "nocompress_extension=%s"},
  227. {Opt_compress_chksum, "compress_chksum"},
  228. {Opt_compress_mode, "compress_mode=%s"},
  229. {Opt_compress_cache, "compress_cache"},
  230. {Opt_atgc, "atgc"},
  231. {Opt_gc_merge, "gc_merge"},
  232. {Opt_nogc_merge, "nogc_merge"},
  233. {Opt_discard_unit, "discard_unit=%s"},
  234. {Opt_memory_mode, "memory=%s"},
  235. {Opt_age_extent_cache, "age_extent_cache"},
  236. {Opt_err, NULL},
  237. };
  238. void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
  239. {
  240. struct va_format vaf;
  241. va_list args;
  242. int level;
  243. va_start(args, fmt);
  244. level = printk_get_level(fmt);
  245. vaf.fmt = printk_skip_level(fmt);
  246. vaf.va = &args;
  247. printk("%c%cF2FS-fs (%s): %pV\n",
  248. KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
  249. va_end(args);
  250. }
  251. #if IS_ENABLED(CONFIG_UNICODE)
  252. static const struct f2fs_sb_encodings {
  253. __u16 magic;
  254. char *name;
  255. unsigned int version;
  256. } f2fs_sb_encoding_map[] = {
  257. {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
  258. };
  259. static const struct f2fs_sb_encodings *
  260. f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
  261. {
  262. __u16 magic = le16_to_cpu(sb->s_encoding);
  263. int i;
  264. for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
  265. if (magic == f2fs_sb_encoding_map[i].magic)
  266. return &f2fs_sb_encoding_map[i];
  267. return NULL;
  268. }
  269. struct kmem_cache *f2fs_cf_name_slab;
  270. static int __init f2fs_create_casefold_cache(void)
  271. {
  272. f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
  273. F2FS_NAME_LEN);
  274. return f2fs_cf_name_slab ? 0 : -ENOMEM;
  275. }
  276. static void f2fs_destroy_casefold_cache(void)
  277. {
  278. kmem_cache_destroy(f2fs_cf_name_slab);
  279. }
  280. #else
  281. static int __init f2fs_create_casefold_cache(void) { return 0; }
  282. static void f2fs_destroy_casefold_cache(void) { }
  283. #endif
  284. static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
  285. {
  286. block_t limit = min((sbi->user_block_count >> 3),
  287. sbi->user_block_count - sbi->reserved_blocks);
  288. /* limit is 12.5% */
  289. if (test_opt(sbi, RESERVE_ROOT) &&
  290. F2FS_OPTION(sbi).root_reserved_blocks > limit) {
  291. F2FS_OPTION(sbi).root_reserved_blocks = limit;
  292. f2fs_info(sbi, "Reduce reserved blocks for root = %u",
  293. F2FS_OPTION(sbi).root_reserved_blocks);
  294. }
  295. if (!test_opt(sbi, RESERVE_ROOT) &&
  296. (!uid_eq(F2FS_OPTION(sbi).s_resuid,
  297. make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
  298. !gid_eq(F2FS_OPTION(sbi).s_resgid,
  299. make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
  300. f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
  301. from_kuid_munged(&init_user_ns,
  302. F2FS_OPTION(sbi).s_resuid),
  303. from_kgid_munged(&init_user_ns,
  304. F2FS_OPTION(sbi).s_resgid));
  305. }
  306. static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
  307. {
  308. unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
  309. unsigned int avg_vblocks;
  310. unsigned int wanted_reserved_segments;
  311. block_t avail_user_block_count;
  312. if (!F2FS_IO_ALIGNED(sbi))
  313. return 0;
  314. /* average valid block count in section in worst case */
  315. avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
  316. /*
  317. * we need enough free space when migrating one section in worst case
  318. */
  319. wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
  320. reserved_segments(sbi);
  321. wanted_reserved_segments -= reserved_segments(sbi);
  322. avail_user_block_count = sbi->user_block_count -
  323. sbi->current_reserved_blocks -
  324. F2FS_OPTION(sbi).root_reserved_blocks;
  325. if (wanted_reserved_segments * sbi->blocks_per_seg >
  326. avail_user_block_count) {
  327. f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
  328. wanted_reserved_segments,
  329. avail_user_block_count >> sbi->log_blocks_per_seg);
  330. return -ENOSPC;
  331. }
  332. SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
  333. f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
  334. wanted_reserved_segments);
  335. return 0;
  336. }
  337. static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
  338. {
  339. if (!F2FS_OPTION(sbi).unusable_cap_perc)
  340. return;
  341. if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
  342. F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
  343. else
  344. F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
  345. F2FS_OPTION(sbi).unusable_cap_perc;
  346. f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
  347. F2FS_OPTION(sbi).unusable_cap,
  348. F2FS_OPTION(sbi).unusable_cap_perc);
  349. }
  350. static void init_once(void *foo)
  351. {
  352. struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
  353. inode_init_once(&fi->vfs_inode);
  354. }
  355. #ifdef CONFIG_QUOTA
  356. static const char * const quotatypes[] = INITQFNAMES;
  357. #define QTYPE2NAME(t) (quotatypes[t])
  358. static int f2fs_set_qf_name(struct super_block *sb, int qtype,
  359. substring_t *args)
  360. {
  361. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  362. char *qname;
  363. int ret = -EINVAL;
  364. if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
  365. f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
  366. return -EINVAL;
  367. }
  368. if (f2fs_sb_has_quota_ino(sbi)) {
  369. f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
  370. return 0;
  371. }
  372. qname = match_strdup(args);
  373. if (!qname) {
  374. f2fs_err(sbi, "Not enough memory for storing quotafile name");
  375. return -ENOMEM;
  376. }
  377. if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
  378. if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
  379. ret = 0;
  380. else
  381. f2fs_err(sbi, "%s quota file already specified",
  382. QTYPE2NAME(qtype));
  383. goto errout;
  384. }
  385. if (strchr(qname, '/')) {
  386. f2fs_err(sbi, "quotafile must be on filesystem root");
  387. goto errout;
  388. }
  389. F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
  390. set_opt(sbi, QUOTA);
  391. return 0;
  392. errout:
  393. kfree(qname);
  394. return ret;
  395. }
  396. static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
  397. {
  398. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  399. if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
  400. f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
  401. return -EINVAL;
  402. }
  403. kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
  404. F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
  405. return 0;
  406. }
  407. static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
  408. {
  409. /*
  410. * We do the test below only for project quotas. 'usrquota' and
  411. * 'grpquota' mount options are allowed even without quota feature
  412. * to support legacy quotas in quota files.
  413. */
  414. if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
  415. f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
  416. return -1;
  417. }
  418. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
  419. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
  420. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
  421. if (test_opt(sbi, USRQUOTA) &&
  422. F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
  423. clear_opt(sbi, USRQUOTA);
  424. if (test_opt(sbi, GRPQUOTA) &&
  425. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
  426. clear_opt(sbi, GRPQUOTA);
  427. if (test_opt(sbi, PRJQUOTA) &&
  428. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  429. clear_opt(sbi, PRJQUOTA);
  430. if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
  431. test_opt(sbi, PRJQUOTA)) {
  432. f2fs_err(sbi, "old and new quota format mixing");
  433. return -1;
  434. }
  435. if (!F2FS_OPTION(sbi).s_jquota_fmt) {
  436. f2fs_err(sbi, "journaled quota format not specified");
  437. return -1;
  438. }
  439. }
  440. if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
  441. f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
  442. F2FS_OPTION(sbi).s_jquota_fmt = 0;
  443. }
  444. return 0;
  445. }
  446. #endif
  447. static int f2fs_set_test_dummy_encryption(struct super_block *sb,
  448. const char *opt,
  449. const substring_t *arg,
  450. bool is_remount)
  451. {
  452. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  453. struct fs_parameter param = {
  454. .type = fs_value_is_string,
  455. .string = arg->from ? arg->from : "",
  456. };
  457. struct fscrypt_dummy_policy *policy =
  458. &F2FS_OPTION(sbi).dummy_enc_policy;
  459. int err;
  460. if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
  461. f2fs_warn(sbi, "test_dummy_encryption option not supported");
  462. return -EINVAL;
  463. }
  464. if (!f2fs_sb_has_encrypt(sbi)) {
  465. f2fs_err(sbi, "Encrypt feature is off");
  466. return -EINVAL;
  467. }
  468. /*
  469. * This mount option is just for testing, and it's not worthwhile to
  470. * implement the extra complexity (e.g. RCU protection) that would be
  471. * needed to allow it to be set or changed during remount. We do allow
  472. * it to be specified during remount, but only if there is no change.
  473. */
  474. if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
  475. f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
  476. return -EINVAL;
  477. }
  478. err = fscrypt_parse_test_dummy_encryption(&param, policy);
  479. if (err) {
  480. if (err == -EEXIST)
  481. f2fs_warn(sbi,
  482. "Can't change test_dummy_encryption on remount");
  483. else if (err == -EINVAL)
  484. f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
  485. opt);
  486. else
  487. f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
  488. opt, err);
  489. return -EINVAL;
  490. }
  491. f2fs_warn(sbi, "Test dummy encryption mode enabled");
  492. return 0;
  493. }
  494. #ifdef CONFIG_F2FS_FS_COMPRESSION
  495. static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
  496. const char *new_ext, bool is_ext)
  497. {
  498. unsigned char (*ext)[F2FS_EXTENSION_LEN];
  499. int ext_cnt;
  500. int i;
  501. if (is_ext) {
  502. ext = F2FS_OPTION(sbi).extensions;
  503. ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
  504. } else {
  505. ext = F2FS_OPTION(sbi).noextensions;
  506. ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
  507. }
  508. for (i = 0; i < ext_cnt; i++) {
  509. if (!strcasecmp(new_ext, ext[i]))
  510. return true;
  511. }
  512. return false;
  513. }
  514. /*
  515. * 1. The same extension name cannot not appear in both compress and non-compress extension
  516. * at the same time.
  517. * 2. If the compress extension specifies all files, the types specified by the non-compress
  518. * extension will be treated as special cases and will not be compressed.
  519. * 3. Don't allow the non-compress extension specifies all files.
  520. */
  521. static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
  522. {
  523. unsigned char (*ext)[F2FS_EXTENSION_LEN];
  524. unsigned char (*noext)[F2FS_EXTENSION_LEN];
  525. int ext_cnt, noext_cnt, index = 0, no_index = 0;
  526. ext = F2FS_OPTION(sbi).extensions;
  527. ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
  528. noext = F2FS_OPTION(sbi).noextensions;
  529. noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
  530. if (!noext_cnt)
  531. return 0;
  532. for (no_index = 0; no_index < noext_cnt; no_index++) {
  533. if (!strcasecmp("*", noext[no_index])) {
  534. f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
  535. return -EINVAL;
  536. }
  537. for (index = 0; index < ext_cnt; index++) {
  538. if (!strcasecmp(ext[index], noext[no_index])) {
  539. f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
  540. ext[index]);
  541. return -EINVAL;
  542. }
  543. }
  544. }
  545. return 0;
  546. }
  547. #ifdef CONFIG_F2FS_FS_LZ4
  548. static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
  549. {
  550. #ifdef CONFIG_F2FS_FS_LZ4HC
  551. unsigned int level;
  552. #endif
  553. if (strlen(str) == 3) {
  554. F2FS_OPTION(sbi).compress_level = 0;
  555. return 0;
  556. }
  557. #ifdef CONFIG_F2FS_FS_LZ4HC
  558. str += 3;
  559. if (str[0] != ':') {
  560. f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
  561. return -EINVAL;
  562. }
  563. if (kstrtouint(str + 1, 10, &level))
  564. return -EINVAL;
  565. if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
  566. f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
  567. return -EINVAL;
  568. }
  569. F2FS_OPTION(sbi).compress_level = level;
  570. return 0;
  571. #else
  572. f2fs_info(sbi, "kernel doesn't support lz4hc compression");
  573. return -EINVAL;
  574. #endif
  575. }
  576. #endif
  577. #ifdef CONFIG_F2FS_FS_ZSTD
  578. static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
  579. {
  580. unsigned int level;
  581. int len = 4;
  582. if (strlen(str) == len) {
  583. F2FS_OPTION(sbi).compress_level = 0;
  584. return 0;
  585. }
  586. str += len;
  587. if (str[0] != ':') {
  588. f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
  589. return -EINVAL;
  590. }
  591. if (kstrtouint(str + 1, 10, &level))
  592. return -EINVAL;
  593. if (!level || level > zstd_max_clevel()) {
  594. f2fs_info(sbi, "invalid zstd compress level: %d", level);
  595. return -EINVAL;
  596. }
  597. F2FS_OPTION(sbi).compress_level = level;
  598. return 0;
  599. }
  600. #endif
  601. #endif
  602. static int parse_options(struct super_block *sb, char *options, bool is_remount)
  603. {
  604. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  605. substring_t args[MAX_OPT_ARGS];
  606. #ifdef CONFIG_F2FS_FS_COMPRESSION
  607. unsigned char (*ext)[F2FS_EXTENSION_LEN];
  608. unsigned char (*noext)[F2FS_EXTENSION_LEN];
  609. int ext_cnt, noext_cnt;
  610. #endif
  611. char *p, *name;
  612. int arg = 0;
  613. kuid_t uid;
  614. kgid_t gid;
  615. int ret;
  616. if (!options)
  617. goto default_check;
  618. while ((p = strsep(&options, ",")) != NULL) {
  619. int token;
  620. if (!*p)
  621. continue;
  622. /*
  623. * Initialize args struct so we know whether arg was
  624. * found; some options take optional arguments.
  625. */
  626. args[0].to = args[0].from = NULL;
  627. token = match_token(p, f2fs_tokens, args);
  628. switch (token) {
  629. case Opt_gc_background:
  630. name = match_strdup(&args[0]);
  631. if (!name)
  632. return -ENOMEM;
  633. if (!strcmp(name, "on")) {
  634. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
  635. } else if (!strcmp(name, "off")) {
  636. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
  637. } else if (!strcmp(name, "sync")) {
  638. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
  639. } else {
  640. kfree(name);
  641. return -EINVAL;
  642. }
  643. kfree(name);
  644. break;
  645. case Opt_disable_roll_forward:
  646. set_opt(sbi, DISABLE_ROLL_FORWARD);
  647. break;
  648. case Opt_norecovery:
  649. /* this option mounts f2fs with ro */
  650. set_opt(sbi, NORECOVERY);
  651. if (!f2fs_readonly(sb))
  652. return -EINVAL;
  653. break;
  654. case Opt_discard:
  655. if (!f2fs_hw_support_discard(sbi)) {
  656. f2fs_warn(sbi, "device does not support discard");
  657. break;
  658. }
  659. set_opt(sbi, DISCARD);
  660. break;
  661. case Opt_nodiscard:
  662. if (f2fs_hw_should_discard(sbi)) {
  663. f2fs_warn(sbi, "discard is required for zoned block devices");
  664. return -EINVAL;
  665. }
  666. clear_opt(sbi, DISCARD);
  667. break;
  668. case Opt_noheap:
  669. set_opt(sbi, NOHEAP);
  670. break;
  671. case Opt_heap:
  672. clear_opt(sbi, NOHEAP);
  673. break;
  674. #ifdef CONFIG_F2FS_FS_XATTR
  675. case Opt_user_xattr:
  676. set_opt(sbi, XATTR_USER);
  677. break;
  678. case Opt_nouser_xattr:
  679. clear_opt(sbi, XATTR_USER);
  680. break;
  681. case Opt_inline_xattr:
  682. set_opt(sbi, INLINE_XATTR);
  683. break;
  684. case Opt_noinline_xattr:
  685. clear_opt(sbi, INLINE_XATTR);
  686. break;
  687. case Opt_inline_xattr_size:
  688. if (args->from && match_int(args, &arg))
  689. return -EINVAL;
  690. set_opt(sbi, INLINE_XATTR_SIZE);
  691. F2FS_OPTION(sbi).inline_xattr_size = arg;
  692. break;
  693. #else
  694. case Opt_user_xattr:
  695. f2fs_info(sbi, "user_xattr options not supported");
  696. break;
  697. case Opt_nouser_xattr:
  698. f2fs_info(sbi, "nouser_xattr options not supported");
  699. break;
  700. case Opt_inline_xattr:
  701. f2fs_info(sbi, "inline_xattr options not supported");
  702. break;
  703. case Opt_noinline_xattr:
  704. f2fs_info(sbi, "noinline_xattr options not supported");
  705. break;
  706. #endif
  707. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  708. case Opt_acl:
  709. set_opt(sbi, POSIX_ACL);
  710. break;
  711. case Opt_noacl:
  712. clear_opt(sbi, POSIX_ACL);
  713. break;
  714. #else
  715. case Opt_acl:
  716. f2fs_info(sbi, "acl options not supported");
  717. break;
  718. case Opt_noacl:
  719. f2fs_info(sbi, "noacl options not supported");
  720. break;
  721. #endif
  722. case Opt_active_logs:
  723. if (args->from && match_int(args, &arg))
  724. return -EINVAL;
  725. if (arg != 2 && arg != 4 &&
  726. arg != NR_CURSEG_PERSIST_TYPE)
  727. return -EINVAL;
  728. F2FS_OPTION(sbi).active_logs = arg;
  729. break;
  730. case Opt_disable_ext_identify:
  731. set_opt(sbi, DISABLE_EXT_IDENTIFY);
  732. break;
  733. case Opt_inline_data:
  734. set_opt(sbi, INLINE_DATA);
  735. break;
  736. case Opt_inline_dentry:
  737. set_opt(sbi, INLINE_DENTRY);
  738. break;
  739. case Opt_noinline_dentry:
  740. clear_opt(sbi, INLINE_DENTRY);
  741. break;
  742. case Opt_flush_merge:
  743. set_opt(sbi, FLUSH_MERGE);
  744. break;
  745. case Opt_noflush_merge:
  746. clear_opt(sbi, FLUSH_MERGE);
  747. break;
  748. case Opt_nobarrier:
  749. set_opt(sbi, NOBARRIER);
  750. break;
  751. case Opt_barrier:
  752. clear_opt(sbi, NOBARRIER);
  753. break;
  754. case Opt_fastboot:
  755. set_opt(sbi, FASTBOOT);
  756. break;
  757. case Opt_extent_cache:
  758. set_opt(sbi, READ_EXTENT_CACHE);
  759. break;
  760. case Opt_noextent_cache:
  761. clear_opt(sbi, READ_EXTENT_CACHE);
  762. break;
  763. case Opt_noinline_data:
  764. clear_opt(sbi, INLINE_DATA);
  765. break;
  766. case Opt_data_flush:
  767. set_opt(sbi, DATA_FLUSH);
  768. break;
  769. case Opt_reserve_root:
  770. if (args->from && match_int(args, &arg))
  771. return -EINVAL;
  772. if (test_opt(sbi, RESERVE_ROOT)) {
  773. f2fs_info(sbi, "Preserve previous reserve_root=%u",
  774. F2FS_OPTION(sbi).root_reserved_blocks);
  775. } else {
  776. F2FS_OPTION(sbi).root_reserved_blocks = arg;
  777. set_opt(sbi, RESERVE_ROOT);
  778. }
  779. break;
  780. case Opt_resuid:
  781. if (args->from && match_int(args, &arg))
  782. return -EINVAL;
  783. uid = make_kuid(current_user_ns(), arg);
  784. if (!uid_valid(uid)) {
  785. f2fs_err(sbi, "Invalid uid value %d", arg);
  786. return -EINVAL;
  787. }
  788. F2FS_OPTION(sbi).s_resuid = uid;
  789. break;
  790. case Opt_resgid:
  791. if (args->from && match_int(args, &arg))
  792. return -EINVAL;
  793. gid = make_kgid(current_user_ns(), arg);
  794. if (!gid_valid(gid)) {
  795. f2fs_err(sbi, "Invalid gid value %d", arg);
  796. return -EINVAL;
  797. }
  798. F2FS_OPTION(sbi).s_resgid = gid;
  799. break;
  800. case Opt_mode:
  801. name = match_strdup(&args[0]);
  802. if (!name)
  803. return -ENOMEM;
  804. if (!strcmp(name, "adaptive")) {
  805. F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
  806. } else if (!strcmp(name, "lfs")) {
  807. F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
  808. } else if (!strcmp(name, "fragment:segment")) {
  809. F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
  810. } else if (!strcmp(name, "fragment:block")) {
  811. F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
  812. } else {
  813. kfree(name);
  814. return -EINVAL;
  815. }
  816. kfree(name);
  817. break;
  818. case Opt_io_size_bits:
  819. if (args->from && match_int(args, &arg))
  820. return -EINVAL;
  821. if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
  822. f2fs_warn(sbi, "Not support %ld, larger than %d",
  823. BIT(arg), BIO_MAX_VECS);
  824. return -EINVAL;
  825. }
  826. F2FS_OPTION(sbi).write_io_size_bits = arg;
  827. break;
  828. #ifdef CONFIG_F2FS_FAULT_INJECTION
  829. case Opt_fault_injection:
  830. if (args->from && match_int(args, &arg))
  831. return -EINVAL;
  832. f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
  833. set_opt(sbi, FAULT_INJECTION);
  834. break;
  835. case Opt_fault_type:
  836. if (args->from && match_int(args, &arg))
  837. return -EINVAL;
  838. f2fs_build_fault_attr(sbi, 0, arg);
  839. set_opt(sbi, FAULT_INJECTION);
  840. break;
  841. #else
  842. case Opt_fault_injection:
  843. f2fs_info(sbi, "fault_injection options not supported");
  844. break;
  845. case Opt_fault_type:
  846. f2fs_info(sbi, "fault_type options not supported");
  847. break;
  848. #endif
  849. case Opt_lazytime:
  850. sb->s_flags |= SB_LAZYTIME;
  851. break;
  852. case Opt_nolazytime:
  853. sb->s_flags &= ~SB_LAZYTIME;
  854. break;
  855. #ifdef CONFIG_QUOTA
  856. case Opt_quota:
  857. case Opt_usrquota:
  858. set_opt(sbi, USRQUOTA);
  859. break;
  860. case Opt_grpquota:
  861. set_opt(sbi, GRPQUOTA);
  862. break;
  863. case Opt_prjquota:
  864. set_opt(sbi, PRJQUOTA);
  865. break;
  866. case Opt_usrjquota:
  867. ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
  868. if (ret)
  869. return ret;
  870. break;
  871. case Opt_grpjquota:
  872. ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
  873. if (ret)
  874. return ret;
  875. break;
  876. case Opt_prjjquota:
  877. ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
  878. if (ret)
  879. return ret;
  880. break;
  881. case Opt_offusrjquota:
  882. ret = f2fs_clear_qf_name(sb, USRQUOTA);
  883. if (ret)
  884. return ret;
  885. break;
  886. case Opt_offgrpjquota:
  887. ret = f2fs_clear_qf_name(sb, GRPQUOTA);
  888. if (ret)
  889. return ret;
  890. break;
  891. case Opt_offprjjquota:
  892. ret = f2fs_clear_qf_name(sb, PRJQUOTA);
  893. if (ret)
  894. return ret;
  895. break;
  896. case Opt_jqfmt_vfsold:
  897. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
  898. break;
  899. case Opt_jqfmt_vfsv0:
  900. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
  901. break;
  902. case Opt_jqfmt_vfsv1:
  903. F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
  904. break;
  905. case Opt_noquota:
  906. clear_opt(sbi, QUOTA);
  907. clear_opt(sbi, USRQUOTA);
  908. clear_opt(sbi, GRPQUOTA);
  909. clear_opt(sbi, PRJQUOTA);
  910. break;
  911. #else
  912. case Opt_quota:
  913. case Opt_usrquota:
  914. case Opt_grpquota:
  915. case Opt_prjquota:
  916. case Opt_usrjquota:
  917. case Opt_grpjquota:
  918. case Opt_prjjquota:
  919. case Opt_offusrjquota:
  920. case Opt_offgrpjquota:
  921. case Opt_offprjjquota:
  922. case Opt_jqfmt_vfsold:
  923. case Opt_jqfmt_vfsv0:
  924. case Opt_jqfmt_vfsv1:
  925. case Opt_noquota:
  926. f2fs_info(sbi, "quota operations not supported");
  927. break;
  928. #endif
  929. case Opt_alloc:
  930. name = match_strdup(&args[0]);
  931. if (!name)
  932. return -ENOMEM;
  933. if (!strcmp(name, "default")) {
  934. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
  935. } else if (!strcmp(name, "reuse")) {
  936. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
  937. } else {
  938. kfree(name);
  939. return -EINVAL;
  940. }
  941. kfree(name);
  942. break;
  943. case Opt_fsync:
  944. name = match_strdup(&args[0]);
  945. if (!name)
  946. return -ENOMEM;
  947. if (!strcmp(name, "posix")) {
  948. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
  949. } else if (!strcmp(name, "strict")) {
  950. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
  951. } else if (!strcmp(name, "nobarrier")) {
  952. F2FS_OPTION(sbi).fsync_mode =
  953. FSYNC_MODE_NOBARRIER;
  954. } else {
  955. kfree(name);
  956. return -EINVAL;
  957. }
  958. kfree(name);
  959. break;
  960. case Opt_test_dummy_encryption:
  961. ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
  962. is_remount);
  963. if (ret)
  964. return ret;
  965. break;
  966. case Opt_inlinecrypt:
  967. #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
  968. sb->s_flags |= SB_INLINECRYPT;
  969. #else
  970. f2fs_info(sbi, "inline encryption not supported");
  971. #endif
  972. break;
  973. case Opt_checkpoint_disable_cap_perc:
  974. if (args->from && match_int(args, &arg))
  975. return -EINVAL;
  976. if (arg < 0 || arg > 100)
  977. return -EINVAL;
  978. F2FS_OPTION(sbi).unusable_cap_perc = arg;
  979. set_opt(sbi, DISABLE_CHECKPOINT);
  980. break;
  981. case Opt_checkpoint_disable_cap:
  982. if (args->from && match_int(args, &arg))
  983. return -EINVAL;
  984. F2FS_OPTION(sbi).unusable_cap = arg;
  985. set_opt(sbi, DISABLE_CHECKPOINT);
  986. break;
  987. case Opt_checkpoint_disable:
  988. set_opt(sbi, DISABLE_CHECKPOINT);
  989. break;
  990. case Opt_checkpoint_enable:
  991. clear_opt(sbi, DISABLE_CHECKPOINT);
  992. break;
  993. case Opt_checkpoint_merge:
  994. set_opt(sbi, MERGE_CHECKPOINT);
  995. break;
  996. case Opt_nocheckpoint_merge:
  997. clear_opt(sbi, MERGE_CHECKPOINT);
  998. break;
  999. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1000. case Opt_compress_algorithm:
  1001. if (!f2fs_sb_has_compression(sbi)) {
  1002. f2fs_info(sbi, "Image doesn't support compression");
  1003. break;
  1004. }
  1005. name = match_strdup(&args[0]);
  1006. if (!name)
  1007. return -ENOMEM;
  1008. if (!strcmp(name, "lzo")) {
  1009. #ifdef CONFIG_F2FS_FS_LZO
  1010. F2FS_OPTION(sbi).compress_level = 0;
  1011. F2FS_OPTION(sbi).compress_algorithm =
  1012. COMPRESS_LZO;
  1013. #else
  1014. f2fs_info(sbi, "kernel doesn't support lzo compression");
  1015. #endif
  1016. } else if (!strncmp(name, "lz4", 3)) {
  1017. #ifdef CONFIG_F2FS_FS_LZ4
  1018. ret = f2fs_set_lz4hc_level(sbi, name);
  1019. if (ret) {
  1020. kfree(name);
  1021. return -EINVAL;
  1022. }
  1023. F2FS_OPTION(sbi).compress_algorithm =
  1024. COMPRESS_LZ4;
  1025. #else
  1026. f2fs_info(sbi, "kernel doesn't support lz4 compression");
  1027. #endif
  1028. } else if (!strncmp(name, "zstd", 4)) {
  1029. #ifdef CONFIG_F2FS_FS_ZSTD
  1030. ret = f2fs_set_zstd_level(sbi, name);
  1031. if (ret) {
  1032. kfree(name);
  1033. return -EINVAL;
  1034. }
  1035. F2FS_OPTION(sbi).compress_algorithm =
  1036. COMPRESS_ZSTD;
  1037. #else
  1038. f2fs_info(sbi, "kernel doesn't support zstd compression");
  1039. #endif
  1040. } else if (!strcmp(name, "lzo-rle")) {
  1041. #ifdef CONFIG_F2FS_FS_LZORLE
  1042. F2FS_OPTION(sbi).compress_level = 0;
  1043. F2FS_OPTION(sbi).compress_algorithm =
  1044. COMPRESS_LZORLE;
  1045. #else
  1046. f2fs_info(sbi, "kernel doesn't support lzorle compression");
  1047. #endif
  1048. } else {
  1049. kfree(name);
  1050. return -EINVAL;
  1051. }
  1052. kfree(name);
  1053. break;
  1054. case Opt_compress_log_size:
  1055. if (!f2fs_sb_has_compression(sbi)) {
  1056. f2fs_info(sbi, "Image doesn't support compression");
  1057. break;
  1058. }
  1059. if (args->from && match_int(args, &arg))
  1060. return -EINVAL;
  1061. if (arg < MIN_COMPRESS_LOG_SIZE ||
  1062. arg > MAX_COMPRESS_LOG_SIZE) {
  1063. f2fs_err(sbi,
  1064. "Compress cluster log size is out of range");
  1065. return -EINVAL;
  1066. }
  1067. F2FS_OPTION(sbi).compress_log_size = arg;
  1068. break;
  1069. case Opt_compress_extension:
  1070. if (!f2fs_sb_has_compression(sbi)) {
  1071. f2fs_info(sbi, "Image doesn't support compression");
  1072. break;
  1073. }
  1074. name = match_strdup(&args[0]);
  1075. if (!name)
  1076. return -ENOMEM;
  1077. ext = F2FS_OPTION(sbi).extensions;
  1078. ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
  1079. if (strlen(name) >= F2FS_EXTENSION_LEN ||
  1080. ext_cnt >= COMPRESS_EXT_NUM) {
  1081. f2fs_err(sbi,
  1082. "invalid extension length/number");
  1083. kfree(name);
  1084. return -EINVAL;
  1085. }
  1086. if (is_compress_extension_exist(sbi, name, true)) {
  1087. kfree(name);
  1088. break;
  1089. }
  1090. strcpy(ext[ext_cnt], name);
  1091. F2FS_OPTION(sbi).compress_ext_cnt++;
  1092. kfree(name);
  1093. break;
  1094. case Opt_nocompress_extension:
  1095. if (!f2fs_sb_has_compression(sbi)) {
  1096. f2fs_info(sbi, "Image doesn't support compression");
  1097. break;
  1098. }
  1099. name = match_strdup(&args[0]);
  1100. if (!name)
  1101. return -ENOMEM;
  1102. noext = F2FS_OPTION(sbi).noextensions;
  1103. noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
  1104. if (strlen(name) >= F2FS_EXTENSION_LEN ||
  1105. noext_cnt >= COMPRESS_EXT_NUM) {
  1106. f2fs_err(sbi,
  1107. "invalid extension length/number");
  1108. kfree(name);
  1109. return -EINVAL;
  1110. }
  1111. if (is_compress_extension_exist(sbi, name, false)) {
  1112. kfree(name);
  1113. break;
  1114. }
  1115. strcpy(noext[noext_cnt], name);
  1116. F2FS_OPTION(sbi).nocompress_ext_cnt++;
  1117. kfree(name);
  1118. break;
  1119. case Opt_compress_chksum:
  1120. if (!f2fs_sb_has_compression(sbi)) {
  1121. f2fs_info(sbi, "Image doesn't support compression");
  1122. break;
  1123. }
  1124. F2FS_OPTION(sbi).compress_chksum = true;
  1125. break;
  1126. case Opt_compress_mode:
  1127. if (!f2fs_sb_has_compression(sbi)) {
  1128. f2fs_info(sbi, "Image doesn't support compression");
  1129. break;
  1130. }
  1131. name = match_strdup(&args[0]);
  1132. if (!name)
  1133. return -ENOMEM;
  1134. if (!strcmp(name, "fs")) {
  1135. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
  1136. } else if (!strcmp(name, "user")) {
  1137. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
  1138. } else {
  1139. kfree(name);
  1140. return -EINVAL;
  1141. }
  1142. kfree(name);
  1143. break;
  1144. case Opt_compress_cache:
  1145. if (!f2fs_sb_has_compression(sbi)) {
  1146. f2fs_info(sbi, "Image doesn't support compression");
  1147. break;
  1148. }
  1149. set_opt(sbi, COMPRESS_CACHE);
  1150. break;
  1151. #else
  1152. case Opt_compress_algorithm:
  1153. case Opt_compress_log_size:
  1154. case Opt_compress_extension:
  1155. case Opt_nocompress_extension:
  1156. case Opt_compress_chksum:
  1157. case Opt_compress_mode:
  1158. case Opt_compress_cache:
  1159. f2fs_info(sbi, "compression options not supported");
  1160. break;
  1161. #endif
  1162. case Opt_atgc:
  1163. set_opt(sbi, ATGC);
  1164. break;
  1165. case Opt_gc_merge:
  1166. set_opt(sbi, GC_MERGE);
  1167. break;
  1168. case Opt_nogc_merge:
  1169. clear_opt(sbi, GC_MERGE);
  1170. break;
  1171. case Opt_discard_unit:
  1172. name = match_strdup(&args[0]);
  1173. if (!name)
  1174. return -ENOMEM;
  1175. if (!strcmp(name, "block")) {
  1176. F2FS_OPTION(sbi).discard_unit =
  1177. DISCARD_UNIT_BLOCK;
  1178. } else if (!strcmp(name, "segment")) {
  1179. F2FS_OPTION(sbi).discard_unit =
  1180. DISCARD_UNIT_SEGMENT;
  1181. } else if (!strcmp(name, "section")) {
  1182. F2FS_OPTION(sbi).discard_unit =
  1183. DISCARD_UNIT_SECTION;
  1184. } else {
  1185. kfree(name);
  1186. return -EINVAL;
  1187. }
  1188. kfree(name);
  1189. break;
  1190. case Opt_memory_mode:
  1191. name = match_strdup(&args[0]);
  1192. if (!name)
  1193. return -ENOMEM;
  1194. if (!strcmp(name, "normal")) {
  1195. F2FS_OPTION(sbi).memory_mode =
  1196. MEMORY_MODE_NORMAL;
  1197. } else if (!strcmp(name, "low")) {
  1198. F2FS_OPTION(sbi).memory_mode =
  1199. MEMORY_MODE_LOW;
  1200. } else {
  1201. kfree(name);
  1202. return -EINVAL;
  1203. }
  1204. kfree(name);
  1205. break;
  1206. case Opt_age_extent_cache:
  1207. set_opt(sbi, AGE_EXTENT_CACHE);
  1208. break;
  1209. default:
  1210. f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
  1211. p);
  1212. return -EINVAL;
  1213. }
  1214. }
  1215. default_check:
  1216. #ifdef CONFIG_QUOTA
  1217. if (f2fs_check_quota_options(sbi))
  1218. return -EINVAL;
  1219. #else
  1220. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
  1221. f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
  1222. return -EINVAL;
  1223. }
  1224. if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
  1225. f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
  1226. return -EINVAL;
  1227. }
  1228. #endif
  1229. #if !IS_ENABLED(CONFIG_UNICODE)
  1230. if (f2fs_sb_has_casefold(sbi)) {
  1231. f2fs_err(sbi,
  1232. "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
  1233. return -EINVAL;
  1234. }
  1235. #endif
  1236. /*
  1237. * The BLKZONED feature indicates that the drive was formatted with
  1238. * zone alignment optimization. This is optional for host-aware
  1239. * devices, but mandatory for host-managed zoned block devices.
  1240. */
  1241. if (f2fs_sb_has_blkzoned(sbi)) {
  1242. #ifdef CONFIG_BLK_DEV_ZONED
  1243. if (F2FS_OPTION(sbi).discard_unit !=
  1244. DISCARD_UNIT_SECTION) {
  1245. f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
  1246. F2FS_OPTION(sbi).discard_unit =
  1247. DISCARD_UNIT_SECTION;
  1248. }
  1249. if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
  1250. f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
  1251. return -EINVAL;
  1252. }
  1253. #else
  1254. f2fs_err(sbi, "Zoned block device support is not enabled");
  1255. return -EINVAL;
  1256. #endif
  1257. }
  1258. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1259. if (f2fs_test_compress_extension(sbi)) {
  1260. f2fs_err(sbi, "invalid compress or nocompress extension");
  1261. return -EINVAL;
  1262. }
  1263. #endif
  1264. if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
  1265. f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO",
  1266. F2FS_IO_SIZE_KB(sbi));
  1267. return -EINVAL;
  1268. }
  1269. if (test_opt(sbi, INLINE_XATTR_SIZE)) {
  1270. int min_size, max_size;
  1271. if (!f2fs_sb_has_extra_attr(sbi) ||
  1272. !f2fs_sb_has_flexible_inline_xattr(sbi)) {
  1273. f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
  1274. return -EINVAL;
  1275. }
  1276. if (!test_opt(sbi, INLINE_XATTR)) {
  1277. f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
  1278. return -EINVAL;
  1279. }
  1280. min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
  1281. max_size = MAX_INLINE_XATTR_SIZE;
  1282. if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
  1283. F2FS_OPTION(sbi).inline_xattr_size > max_size) {
  1284. f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
  1285. min_size, max_size);
  1286. return -EINVAL;
  1287. }
  1288. }
  1289. if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
  1290. f2fs_err(sbi, "LFS is not compatible with checkpoint=disable");
  1291. return -EINVAL;
  1292. }
  1293. if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) {
  1294. f2fs_err(sbi, "LFS is not compatible with ATGC");
  1295. return -EINVAL;
  1296. }
  1297. if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) {
  1298. f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
  1299. return -EINVAL;
  1300. }
  1301. if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
  1302. f2fs_err(sbi, "Allow to mount readonly mode only");
  1303. return -EROFS;
  1304. }
  1305. return 0;
  1306. }
  1307. static struct inode *f2fs_alloc_inode(struct super_block *sb)
  1308. {
  1309. struct f2fs_inode_info *fi;
  1310. if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
  1311. return NULL;
  1312. fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
  1313. if (!fi)
  1314. return NULL;
  1315. init_once((void *) fi);
  1316. /* Initialize f2fs-specific inode info */
  1317. atomic_set(&fi->dirty_pages, 0);
  1318. atomic_set(&fi->i_compr_blocks, 0);
  1319. init_f2fs_rwsem(&fi->i_sem);
  1320. spin_lock_init(&fi->i_size_lock);
  1321. INIT_LIST_HEAD(&fi->dirty_list);
  1322. INIT_LIST_HEAD(&fi->gdirty_list);
  1323. init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
  1324. init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
  1325. init_f2fs_rwsem(&fi->i_xattr_sem);
  1326. /* Will be used by directory only */
  1327. fi->i_dir_level = F2FS_SB(sb)->dir_level;
  1328. return &fi->vfs_inode;
  1329. }
  1330. static int f2fs_drop_inode(struct inode *inode)
  1331. {
  1332. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1333. int ret;
  1334. /*
  1335. * during filesystem shutdown, if checkpoint is disabled,
  1336. * drop useless meta/node dirty pages.
  1337. */
  1338. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
  1339. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  1340. inode->i_ino == F2FS_META_INO(sbi)) {
  1341. trace_f2fs_drop_inode(inode, 1);
  1342. return 1;
  1343. }
  1344. }
  1345. /*
  1346. * This is to avoid a deadlock condition like below.
  1347. * writeback_single_inode(inode)
  1348. * - f2fs_write_data_page
  1349. * - f2fs_gc -> iput -> evict
  1350. * - inode_wait_for_writeback(inode)
  1351. */
  1352. if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
  1353. if (!inode->i_nlink && !is_bad_inode(inode)) {
  1354. /* to avoid evict_inode call simultaneously */
  1355. atomic_inc(&inode->i_count);
  1356. spin_unlock(&inode->i_lock);
  1357. /* should remain fi->extent_tree for writepage */
  1358. f2fs_destroy_extent_node(inode);
  1359. sb_start_intwrite(inode->i_sb);
  1360. f2fs_i_size_write(inode, 0);
  1361. f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
  1362. inode, NULL, 0, DATA);
  1363. truncate_inode_pages_final(inode->i_mapping);
  1364. if (F2FS_HAS_BLOCKS(inode))
  1365. f2fs_truncate(inode);
  1366. sb_end_intwrite(inode->i_sb);
  1367. spin_lock(&inode->i_lock);
  1368. atomic_dec(&inode->i_count);
  1369. }
  1370. trace_f2fs_drop_inode(inode, 0);
  1371. return 0;
  1372. }
  1373. ret = generic_drop_inode(inode);
  1374. if (!ret)
  1375. ret = fscrypt_drop_inode(inode);
  1376. trace_f2fs_drop_inode(inode, ret);
  1377. return ret;
  1378. }
  1379. int f2fs_inode_dirtied(struct inode *inode, bool sync)
  1380. {
  1381. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1382. int ret = 0;
  1383. spin_lock(&sbi->inode_lock[DIRTY_META]);
  1384. if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  1385. ret = 1;
  1386. } else {
  1387. set_inode_flag(inode, FI_DIRTY_INODE);
  1388. stat_inc_dirty_inode(sbi, DIRTY_META);
  1389. }
  1390. if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
  1391. list_add_tail(&F2FS_I(inode)->gdirty_list,
  1392. &sbi->inode_list[DIRTY_META]);
  1393. inc_page_count(sbi, F2FS_DIRTY_IMETA);
  1394. }
  1395. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1396. return ret;
  1397. }
  1398. void f2fs_inode_synced(struct inode *inode)
  1399. {
  1400. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1401. spin_lock(&sbi->inode_lock[DIRTY_META]);
  1402. if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
  1403. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1404. return;
  1405. }
  1406. if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
  1407. list_del_init(&F2FS_I(inode)->gdirty_list);
  1408. dec_page_count(sbi, F2FS_DIRTY_IMETA);
  1409. }
  1410. clear_inode_flag(inode, FI_DIRTY_INODE);
  1411. clear_inode_flag(inode, FI_AUTO_RECOVER);
  1412. stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
  1413. spin_unlock(&sbi->inode_lock[DIRTY_META]);
  1414. }
  1415. /*
  1416. * f2fs_dirty_inode() is called from __mark_inode_dirty()
  1417. *
  1418. * We should call set_dirty_inode to write the dirty inode through write_inode.
  1419. */
  1420. static void f2fs_dirty_inode(struct inode *inode, int flags)
  1421. {
  1422. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  1423. if (inode->i_ino == F2FS_NODE_INO(sbi) ||
  1424. inode->i_ino == F2FS_META_INO(sbi))
  1425. return;
  1426. if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
  1427. clear_inode_flag(inode, FI_AUTO_RECOVER);
  1428. f2fs_inode_dirtied(inode, false);
  1429. }
  1430. static void f2fs_free_inode(struct inode *inode)
  1431. {
  1432. fscrypt_free_inode(inode);
  1433. kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
  1434. }
  1435. static void destroy_percpu_info(struct f2fs_sb_info *sbi)
  1436. {
  1437. percpu_counter_destroy(&sbi->total_valid_inode_count);
  1438. percpu_counter_destroy(&sbi->rf_node_block_count);
  1439. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  1440. }
  1441. static void destroy_device_list(struct f2fs_sb_info *sbi)
  1442. {
  1443. int i;
  1444. for (i = 0; i < sbi->s_ndevs; i++) {
  1445. blkdev_put(FDEV(i).bdev, FMODE_EXCL);
  1446. #ifdef CONFIG_BLK_DEV_ZONED
  1447. kvfree(FDEV(i).blkz_seq);
  1448. #endif
  1449. }
  1450. kvfree(sbi->devs);
  1451. }
  1452. static void f2fs_put_super(struct super_block *sb)
  1453. {
  1454. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1455. int i;
  1456. bool done;
  1457. /* unregister procfs/sysfs entries in advance to avoid race case */
  1458. f2fs_unregister_sysfs(sbi);
  1459. f2fs_quota_off_umount(sb);
  1460. /* prevent remaining shrinker jobs */
  1461. mutex_lock(&sbi->umount_mutex);
  1462. /*
  1463. * flush all issued checkpoints and stop checkpoint issue thread.
  1464. * after then, all checkpoints should be done by each process context.
  1465. */
  1466. f2fs_stop_ckpt_thread(sbi);
  1467. /*
  1468. * We don't need to do checkpoint when superblock is clean.
  1469. * But, the previous checkpoint was not done by umount, it needs to do
  1470. * clean checkpoint again.
  1471. */
  1472. if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  1473. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
  1474. struct cp_control cpc = {
  1475. .reason = CP_UMOUNT,
  1476. };
  1477. f2fs_write_checkpoint(sbi, &cpc);
  1478. }
  1479. /* be sure to wait for any on-going discard commands */
  1480. done = f2fs_issue_discard_timeout(sbi);
  1481. if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
  1482. struct cp_control cpc = {
  1483. .reason = CP_UMOUNT | CP_TRIMMED,
  1484. };
  1485. f2fs_write_checkpoint(sbi, &cpc);
  1486. }
  1487. /*
  1488. * normally superblock is clean, so we need to release this.
  1489. * In addition, EIO will skip do checkpoint, we need this as well.
  1490. */
  1491. f2fs_release_ino_entry(sbi, true);
  1492. f2fs_leave_shrinker(sbi);
  1493. mutex_unlock(&sbi->umount_mutex);
  1494. /* our cp_error case, we can wait for any writeback page */
  1495. f2fs_flush_merged_writes(sbi);
  1496. f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
  1497. f2fs_bug_on(sbi, sbi->fsync_node_num);
  1498. f2fs_destroy_compress_inode(sbi);
  1499. iput(sbi->node_inode);
  1500. sbi->node_inode = NULL;
  1501. iput(sbi->meta_inode);
  1502. sbi->meta_inode = NULL;
  1503. /*
  1504. * iput() can update stat information, if f2fs_write_checkpoint()
  1505. * above failed with error.
  1506. */
  1507. f2fs_destroy_stats(sbi);
  1508. /* destroy f2fs internal modules */
  1509. f2fs_destroy_node_manager(sbi);
  1510. f2fs_destroy_segment_manager(sbi);
  1511. f2fs_destroy_post_read_wq(sbi);
  1512. kvfree(sbi->ckpt);
  1513. sb->s_fs_info = NULL;
  1514. if (sbi->s_chksum_driver)
  1515. crypto_free_shash(sbi->s_chksum_driver);
  1516. kfree(sbi->raw_super);
  1517. destroy_device_list(sbi);
  1518. f2fs_destroy_page_array_cache(sbi);
  1519. f2fs_destroy_xattr_caches(sbi);
  1520. mempool_destroy(sbi->write_io_dummy);
  1521. #ifdef CONFIG_QUOTA
  1522. for (i = 0; i < MAXQUOTAS; i++)
  1523. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  1524. #endif
  1525. fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
  1526. destroy_percpu_info(sbi);
  1527. f2fs_destroy_iostat(sbi);
  1528. for (i = 0; i < NR_PAGE_TYPE; i++)
  1529. kvfree(sbi->write_io[i]);
  1530. #if IS_ENABLED(CONFIG_UNICODE)
  1531. utf8_unload(sb->s_encoding);
  1532. #endif
  1533. kfree(sbi);
  1534. }
  1535. int f2fs_sync_fs(struct super_block *sb, int sync)
  1536. {
  1537. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1538. int err = 0;
  1539. if (unlikely(f2fs_cp_error(sbi)))
  1540. return 0;
  1541. if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
  1542. return 0;
  1543. trace_f2fs_sync_fs(sb, sync);
  1544. if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
  1545. return -EAGAIN;
  1546. if (sync)
  1547. err = f2fs_issue_checkpoint(sbi);
  1548. return err;
  1549. }
  1550. static int f2fs_freeze(struct super_block *sb)
  1551. {
  1552. if (f2fs_readonly(sb))
  1553. return 0;
  1554. /* IO error happened before */
  1555. if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
  1556. return -EIO;
  1557. /* must be clean, since sync_filesystem() was already called */
  1558. if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
  1559. return -EINVAL;
  1560. /* Let's flush checkpoints and stop the thread. */
  1561. f2fs_flush_ckpt_thread(F2FS_SB(sb));
  1562. /* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
  1563. set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
  1564. return 0;
  1565. }
  1566. static int f2fs_unfreeze(struct super_block *sb)
  1567. {
  1568. clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
  1569. return 0;
  1570. }
  1571. #ifdef CONFIG_QUOTA
  1572. static int f2fs_statfs_project(struct super_block *sb,
  1573. kprojid_t projid, struct kstatfs *buf)
  1574. {
  1575. struct kqid qid;
  1576. struct dquot *dquot;
  1577. u64 limit;
  1578. u64 curblock;
  1579. qid = make_kqid_projid(projid);
  1580. dquot = dqget(sb, qid);
  1581. if (IS_ERR(dquot))
  1582. return PTR_ERR(dquot);
  1583. spin_lock(&dquot->dq_dqb_lock);
  1584. limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
  1585. dquot->dq_dqb.dqb_bhardlimit);
  1586. if (limit)
  1587. limit >>= sb->s_blocksize_bits;
  1588. if (limit && buf->f_blocks > limit) {
  1589. curblock = (dquot->dq_dqb.dqb_curspace +
  1590. dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
  1591. buf->f_blocks = limit;
  1592. buf->f_bfree = buf->f_bavail =
  1593. (buf->f_blocks > curblock) ?
  1594. (buf->f_blocks - curblock) : 0;
  1595. }
  1596. limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
  1597. dquot->dq_dqb.dqb_ihardlimit);
  1598. if (limit && buf->f_files > limit) {
  1599. buf->f_files = limit;
  1600. buf->f_ffree =
  1601. (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
  1602. (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
  1603. }
  1604. spin_unlock(&dquot->dq_dqb_lock);
  1605. dqput(dquot);
  1606. return 0;
  1607. }
  1608. #endif
  1609. static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
  1610. {
  1611. struct super_block *sb = dentry->d_sb;
  1612. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1613. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  1614. block_t total_count, user_block_count, start_count;
  1615. u64 avail_node_count;
  1616. unsigned int total_valid_node_count;
  1617. total_count = le64_to_cpu(sbi->raw_super->block_count);
  1618. start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
  1619. buf->f_type = F2FS_SUPER_MAGIC;
  1620. buf->f_bsize = sbi->blocksize;
  1621. buf->f_blocks = total_count - start_count;
  1622. spin_lock(&sbi->stat_lock);
  1623. user_block_count = sbi->user_block_count;
  1624. total_valid_node_count = valid_node_count(sbi);
  1625. avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  1626. buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
  1627. sbi->current_reserved_blocks;
  1628. if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
  1629. buf->f_bfree = 0;
  1630. else
  1631. buf->f_bfree -= sbi->unusable_block_count;
  1632. spin_unlock(&sbi->stat_lock);
  1633. if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
  1634. buf->f_bavail = buf->f_bfree -
  1635. F2FS_OPTION(sbi).root_reserved_blocks;
  1636. else
  1637. buf->f_bavail = 0;
  1638. if (avail_node_count > user_block_count) {
  1639. buf->f_files = user_block_count;
  1640. buf->f_ffree = buf->f_bavail;
  1641. } else {
  1642. buf->f_files = avail_node_count;
  1643. buf->f_ffree = min(avail_node_count - total_valid_node_count,
  1644. buf->f_bavail);
  1645. }
  1646. buf->f_namelen = F2FS_NAME_LEN;
  1647. buf->f_fsid = u64_to_fsid(id);
  1648. #ifdef CONFIG_QUOTA
  1649. if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
  1650. sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
  1651. f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
  1652. }
  1653. #endif
  1654. return 0;
  1655. }
  1656. static inline void f2fs_show_quota_options(struct seq_file *seq,
  1657. struct super_block *sb)
  1658. {
  1659. #ifdef CONFIG_QUOTA
  1660. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1661. if (F2FS_OPTION(sbi).s_jquota_fmt) {
  1662. char *fmtname = "";
  1663. switch (F2FS_OPTION(sbi).s_jquota_fmt) {
  1664. case QFMT_VFS_OLD:
  1665. fmtname = "vfsold";
  1666. break;
  1667. case QFMT_VFS_V0:
  1668. fmtname = "vfsv0";
  1669. break;
  1670. case QFMT_VFS_V1:
  1671. fmtname = "vfsv1";
  1672. break;
  1673. }
  1674. seq_printf(seq, ",jqfmt=%s", fmtname);
  1675. }
  1676. if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
  1677. seq_show_option(seq, "usrjquota",
  1678. F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
  1679. if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
  1680. seq_show_option(seq, "grpjquota",
  1681. F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
  1682. if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
  1683. seq_show_option(seq, "prjjquota",
  1684. F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
  1685. #endif
  1686. }
  1687. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1688. static inline void f2fs_show_compress_options(struct seq_file *seq,
  1689. struct super_block *sb)
  1690. {
  1691. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  1692. char *algtype = "";
  1693. int i;
  1694. if (!f2fs_sb_has_compression(sbi))
  1695. return;
  1696. switch (F2FS_OPTION(sbi).compress_algorithm) {
  1697. case COMPRESS_LZO:
  1698. algtype = "lzo";
  1699. break;
  1700. case COMPRESS_LZ4:
  1701. algtype = "lz4";
  1702. break;
  1703. case COMPRESS_ZSTD:
  1704. algtype = "zstd";
  1705. break;
  1706. case COMPRESS_LZORLE:
  1707. algtype = "lzo-rle";
  1708. break;
  1709. }
  1710. seq_printf(seq, ",compress_algorithm=%s", algtype);
  1711. if (F2FS_OPTION(sbi).compress_level)
  1712. seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
  1713. seq_printf(seq, ",compress_log_size=%u",
  1714. F2FS_OPTION(sbi).compress_log_size);
  1715. for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
  1716. seq_printf(seq, ",compress_extension=%s",
  1717. F2FS_OPTION(sbi).extensions[i]);
  1718. }
  1719. for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
  1720. seq_printf(seq, ",nocompress_extension=%s",
  1721. F2FS_OPTION(sbi).noextensions[i]);
  1722. }
  1723. if (F2FS_OPTION(sbi).compress_chksum)
  1724. seq_puts(seq, ",compress_chksum");
  1725. if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
  1726. seq_printf(seq, ",compress_mode=%s", "fs");
  1727. else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
  1728. seq_printf(seq, ",compress_mode=%s", "user");
  1729. if (test_opt(sbi, COMPRESS_CACHE))
  1730. seq_puts(seq, ",compress_cache");
  1731. }
  1732. #endif
  1733. static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
  1734. {
  1735. struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
  1736. if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
  1737. seq_printf(seq, ",background_gc=%s", "sync");
  1738. else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
  1739. seq_printf(seq, ",background_gc=%s", "on");
  1740. else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
  1741. seq_printf(seq, ",background_gc=%s", "off");
  1742. if (test_opt(sbi, GC_MERGE))
  1743. seq_puts(seq, ",gc_merge");
  1744. else
  1745. seq_puts(seq, ",nogc_merge");
  1746. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  1747. seq_puts(seq, ",disable_roll_forward");
  1748. if (test_opt(sbi, NORECOVERY))
  1749. seq_puts(seq, ",norecovery");
  1750. if (test_opt(sbi, DISCARD)) {
  1751. seq_puts(seq, ",discard");
  1752. if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
  1753. seq_printf(seq, ",discard_unit=%s", "block");
  1754. else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
  1755. seq_printf(seq, ",discard_unit=%s", "segment");
  1756. else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
  1757. seq_printf(seq, ",discard_unit=%s", "section");
  1758. } else {
  1759. seq_puts(seq, ",nodiscard");
  1760. }
  1761. if (test_opt(sbi, NOHEAP))
  1762. seq_puts(seq, ",no_heap");
  1763. else
  1764. seq_puts(seq, ",heap");
  1765. #ifdef CONFIG_F2FS_FS_XATTR
  1766. if (test_opt(sbi, XATTR_USER))
  1767. seq_puts(seq, ",user_xattr");
  1768. else
  1769. seq_puts(seq, ",nouser_xattr");
  1770. if (test_opt(sbi, INLINE_XATTR))
  1771. seq_puts(seq, ",inline_xattr");
  1772. else
  1773. seq_puts(seq, ",noinline_xattr");
  1774. if (test_opt(sbi, INLINE_XATTR_SIZE))
  1775. seq_printf(seq, ",inline_xattr_size=%u",
  1776. F2FS_OPTION(sbi).inline_xattr_size);
  1777. #endif
  1778. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  1779. if (test_opt(sbi, POSIX_ACL))
  1780. seq_puts(seq, ",acl");
  1781. else
  1782. seq_puts(seq, ",noacl");
  1783. #endif
  1784. if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
  1785. seq_puts(seq, ",disable_ext_identify");
  1786. if (test_opt(sbi, INLINE_DATA))
  1787. seq_puts(seq, ",inline_data");
  1788. else
  1789. seq_puts(seq, ",noinline_data");
  1790. if (test_opt(sbi, INLINE_DENTRY))
  1791. seq_puts(seq, ",inline_dentry");
  1792. else
  1793. seq_puts(seq, ",noinline_dentry");
  1794. if (test_opt(sbi, FLUSH_MERGE))
  1795. seq_puts(seq, ",flush_merge");
  1796. else
  1797. seq_puts(seq, ",noflush_merge");
  1798. if (test_opt(sbi, NOBARRIER))
  1799. seq_puts(seq, ",nobarrier");
  1800. else
  1801. seq_puts(seq, ",barrier");
  1802. if (test_opt(sbi, FASTBOOT))
  1803. seq_puts(seq, ",fastboot");
  1804. if (test_opt(sbi, READ_EXTENT_CACHE))
  1805. seq_puts(seq, ",extent_cache");
  1806. else
  1807. seq_puts(seq, ",noextent_cache");
  1808. if (test_opt(sbi, AGE_EXTENT_CACHE))
  1809. seq_puts(seq, ",age_extent_cache");
  1810. if (test_opt(sbi, DATA_FLUSH))
  1811. seq_puts(seq, ",data_flush");
  1812. seq_puts(seq, ",mode=");
  1813. if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
  1814. seq_puts(seq, "adaptive");
  1815. else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
  1816. seq_puts(seq, "lfs");
  1817. else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
  1818. seq_puts(seq, "fragment:segment");
  1819. else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
  1820. seq_puts(seq, "fragment:block");
  1821. seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
  1822. if (test_opt(sbi, RESERVE_ROOT))
  1823. seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
  1824. F2FS_OPTION(sbi).root_reserved_blocks,
  1825. from_kuid_munged(&init_user_ns,
  1826. F2FS_OPTION(sbi).s_resuid),
  1827. from_kgid_munged(&init_user_ns,
  1828. F2FS_OPTION(sbi).s_resgid));
  1829. if (F2FS_IO_SIZE_BITS(sbi))
  1830. seq_printf(seq, ",io_bits=%u",
  1831. F2FS_OPTION(sbi).write_io_size_bits);
  1832. #ifdef CONFIG_F2FS_FAULT_INJECTION
  1833. if (test_opt(sbi, FAULT_INJECTION)) {
  1834. seq_printf(seq, ",fault_injection=%u",
  1835. F2FS_OPTION(sbi).fault_info.inject_rate);
  1836. seq_printf(seq, ",fault_type=%u",
  1837. F2FS_OPTION(sbi).fault_info.inject_type);
  1838. }
  1839. #endif
  1840. #ifdef CONFIG_QUOTA
  1841. if (test_opt(sbi, QUOTA))
  1842. seq_puts(seq, ",quota");
  1843. if (test_opt(sbi, USRQUOTA))
  1844. seq_puts(seq, ",usrquota");
  1845. if (test_opt(sbi, GRPQUOTA))
  1846. seq_puts(seq, ",grpquota");
  1847. if (test_opt(sbi, PRJQUOTA))
  1848. seq_puts(seq, ",prjquota");
  1849. #endif
  1850. f2fs_show_quota_options(seq, sbi->sb);
  1851. fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
  1852. if (sbi->sb->s_flags & SB_INLINECRYPT)
  1853. seq_puts(seq, ",inlinecrypt");
  1854. if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
  1855. seq_printf(seq, ",alloc_mode=%s", "default");
  1856. else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
  1857. seq_printf(seq, ",alloc_mode=%s", "reuse");
  1858. if (test_opt(sbi, DISABLE_CHECKPOINT))
  1859. seq_printf(seq, ",checkpoint=disable:%u",
  1860. F2FS_OPTION(sbi).unusable_cap);
  1861. if (test_opt(sbi, MERGE_CHECKPOINT))
  1862. seq_puts(seq, ",checkpoint_merge");
  1863. else
  1864. seq_puts(seq, ",nocheckpoint_merge");
  1865. if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
  1866. seq_printf(seq, ",fsync_mode=%s", "posix");
  1867. else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
  1868. seq_printf(seq, ",fsync_mode=%s", "strict");
  1869. else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
  1870. seq_printf(seq, ",fsync_mode=%s", "nobarrier");
  1871. #ifdef CONFIG_F2FS_FS_COMPRESSION
  1872. f2fs_show_compress_options(seq, sbi->sb);
  1873. #endif
  1874. if (test_opt(sbi, ATGC))
  1875. seq_puts(seq, ",atgc");
  1876. if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
  1877. seq_printf(seq, ",memory=%s", "normal");
  1878. else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
  1879. seq_printf(seq, ",memory=%s", "low");
  1880. return 0;
  1881. }
  1882. static void default_options(struct f2fs_sb_info *sbi)
  1883. {
  1884. /* init some FS parameters */
  1885. if (f2fs_sb_has_readonly(sbi))
  1886. F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
  1887. else
  1888. F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
  1889. F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
  1890. if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
  1891. SMALL_VOLUME_SEGMENTS)
  1892. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
  1893. else
  1894. F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
  1895. F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
  1896. F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
  1897. F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
  1898. if (f2fs_sb_has_compression(sbi)) {
  1899. F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
  1900. F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
  1901. F2FS_OPTION(sbi).compress_ext_cnt = 0;
  1902. F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
  1903. }
  1904. F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
  1905. F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
  1906. sbi->sb->s_flags &= ~SB_INLINECRYPT;
  1907. set_opt(sbi, INLINE_XATTR);
  1908. set_opt(sbi, INLINE_DATA);
  1909. set_opt(sbi, INLINE_DENTRY);
  1910. set_opt(sbi, READ_EXTENT_CACHE);
  1911. set_opt(sbi, NOHEAP);
  1912. clear_opt(sbi, DISABLE_CHECKPOINT);
  1913. set_opt(sbi, MERGE_CHECKPOINT);
  1914. F2FS_OPTION(sbi).unusable_cap = 0;
  1915. sbi->sb->s_flags |= SB_LAZYTIME;
  1916. if (!f2fs_is_readonly(sbi))
  1917. set_opt(sbi, FLUSH_MERGE);
  1918. if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
  1919. set_opt(sbi, DISCARD);
  1920. if (f2fs_sb_has_blkzoned(sbi)) {
  1921. F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
  1922. F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
  1923. } else {
  1924. F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
  1925. F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
  1926. }
  1927. #ifdef CONFIG_F2FS_FS_XATTR
  1928. set_opt(sbi, XATTR_USER);
  1929. #endif
  1930. #ifdef CONFIG_F2FS_FS_POSIX_ACL
  1931. set_opt(sbi, POSIX_ACL);
  1932. #endif
  1933. f2fs_build_fault_attr(sbi, 0, 0);
  1934. }
  1935. #ifdef CONFIG_QUOTA
  1936. static int f2fs_enable_quotas(struct super_block *sb);
  1937. #endif
  1938. static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
  1939. {
  1940. unsigned int s_flags = sbi->sb->s_flags;
  1941. struct cp_control cpc;
  1942. unsigned int gc_mode = sbi->gc_mode;
  1943. int err = 0;
  1944. int ret;
  1945. block_t unusable;
  1946. if (s_flags & SB_RDONLY) {
  1947. f2fs_err(sbi, "checkpoint=disable on readonly fs");
  1948. return -EINVAL;
  1949. }
  1950. sbi->sb->s_flags |= SB_ACTIVE;
  1951. /* check if we need more GC first */
  1952. unusable = f2fs_get_unusable_blocks(sbi);
  1953. if (!f2fs_disable_cp_again(sbi, unusable))
  1954. goto skip_gc;
  1955. f2fs_update_time(sbi, DISABLE_TIME);
  1956. sbi->gc_mode = GC_URGENT_HIGH;
  1957. while (!f2fs_time_over(sbi, DISABLE_TIME)) {
  1958. struct f2fs_gc_control gc_control = {
  1959. .victim_segno = NULL_SEGNO,
  1960. .init_gc_type = FG_GC,
  1961. .should_migrate_blocks = false,
  1962. .err_gc_skipped = true,
  1963. .nr_free_secs = 1 };
  1964. f2fs_down_write(&sbi->gc_lock);
  1965. err = f2fs_gc(sbi, &gc_control);
  1966. if (err == -ENODATA) {
  1967. err = 0;
  1968. break;
  1969. }
  1970. if (err && err != -EAGAIN)
  1971. break;
  1972. }
  1973. ret = sync_filesystem(sbi->sb);
  1974. if (ret || err) {
  1975. err = ret ? ret : err;
  1976. goto restore_flag;
  1977. }
  1978. unusable = f2fs_get_unusable_blocks(sbi);
  1979. if (f2fs_disable_cp_again(sbi, unusable)) {
  1980. err = -EAGAIN;
  1981. goto restore_flag;
  1982. }
  1983. skip_gc:
  1984. f2fs_down_write(&sbi->gc_lock);
  1985. cpc.reason = CP_PAUSE;
  1986. set_sbi_flag(sbi, SBI_CP_DISABLED);
  1987. err = f2fs_write_checkpoint(sbi, &cpc);
  1988. if (err)
  1989. goto out_unlock;
  1990. spin_lock(&sbi->stat_lock);
  1991. sbi->unusable_block_count = unusable;
  1992. spin_unlock(&sbi->stat_lock);
  1993. out_unlock:
  1994. f2fs_up_write(&sbi->gc_lock);
  1995. restore_flag:
  1996. sbi->gc_mode = gc_mode;
  1997. sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
  1998. return err;
  1999. }
  2000. static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
  2001. {
  2002. int retry = DEFAULT_RETRY_IO_COUNT;
  2003. /* we should flush all the data to keep data consistency */
  2004. do {
  2005. sync_inodes_sb(sbi->sb);
  2006. f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
  2007. } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
  2008. if (unlikely(retry < 0))
  2009. f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
  2010. f2fs_down_write(&sbi->gc_lock);
  2011. f2fs_dirty_to_prefree(sbi);
  2012. clear_sbi_flag(sbi, SBI_CP_DISABLED);
  2013. set_sbi_flag(sbi, SBI_IS_DIRTY);
  2014. f2fs_up_write(&sbi->gc_lock);
  2015. f2fs_sync_fs(sbi->sb, 1);
  2016. /* Let's ensure there's no pending checkpoint anymore */
  2017. f2fs_flush_ckpt_thread(sbi);
  2018. }
  2019. static int f2fs_remount(struct super_block *sb, int *flags, char *data)
  2020. {
  2021. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2022. struct f2fs_mount_info org_mount_opt;
  2023. unsigned long old_sb_flags;
  2024. int err;
  2025. bool need_restart_gc = false, need_stop_gc = false;
  2026. bool need_restart_ckpt = false, need_stop_ckpt = false;
  2027. bool need_restart_flush = false, need_stop_flush = false;
  2028. bool need_restart_discard = false, need_stop_discard = false;
  2029. bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
  2030. bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
  2031. bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
  2032. bool no_io_align = !F2FS_IO_ALIGNED(sbi);
  2033. bool no_atgc = !test_opt(sbi, ATGC);
  2034. bool no_discard = !test_opt(sbi, DISCARD);
  2035. bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
  2036. bool block_unit_discard = f2fs_block_unit_discard(sbi);
  2037. #ifdef CONFIG_QUOTA
  2038. int i, j;
  2039. #endif
  2040. /*
  2041. * Save the old mount options in case we
  2042. * need to restore them.
  2043. */
  2044. org_mount_opt = sbi->mount_opt;
  2045. old_sb_flags = sb->s_flags;
  2046. #ifdef CONFIG_QUOTA
  2047. org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
  2048. for (i = 0; i < MAXQUOTAS; i++) {
  2049. if (F2FS_OPTION(sbi).s_qf_names[i]) {
  2050. org_mount_opt.s_qf_names[i] =
  2051. kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
  2052. GFP_KERNEL);
  2053. if (!org_mount_opt.s_qf_names[i]) {
  2054. for (j = 0; j < i; j++)
  2055. kfree(org_mount_opt.s_qf_names[j]);
  2056. return -ENOMEM;
  2057. }
  2058. } else {
  2059. org_mount_opt.s_qf_names[i] = NULL;
  2060. }
  2061. }
  2062. #endif
  2063. /* recover superblocks we couldn't write due to previous RO mount */
  2064. if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
  2065. err = f2fs_commit_super(sbi, false);
  2066. f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
  2067. err);
  2068. if (!err)
  2069. clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  2070. }
  2071. default_options(sbi);
  2072. /* parse mount options */
  2073. err = parse_options(sb, data, true);
  2074. if (err)
  2075. goto restore_opts;
  2076. /*
  2077. * Previous and new state of filesystem is RO,
  2078. * so skip checking GC and FLUSH_MERGE conditions.
  2079. */
  2080. if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
  2081. goto skip;
  2082. if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) {
  2083. err = -EROFS;
  2084. goto restore_opts;
  2085. }
  2086. #ifdef CONFIG_QUOTA
  2087. if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
  2088. err = dquot_suspend(sb, -1);
  2089. if (err < 0)
  2090. goto restore_opts;
  2091. } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
  2092. /* dquot_resume needs RW */
  2093. sb->s_flags &= ~SB_RDONLY;
  2094. if (sb_any_quota_suspended(sb)) {
  2095. dquot_resume(sb, -1);
  2096. } else if (f2fs_sb_has_quota_ino(sbi)) {
  2097. err = f2fs_enable_quotas(sb);
  2098. if (err)
  2099. goto restore_opts;
  2100. }
  2101. }
  2102. #endif
  2103. if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
  2104. err = -EINVAL;
  2105. f2fs_warn(sbi, "LFS is not compatible with IPU");
  2106. goto restore_opts;
  2107. }
  2108. /* disallow enable atgc dynamically */
  2109. if (no_atgc == !!test_opt(sbi, ATGC)) {
  2110. err = -EINVAL;
  2111. f2fs_warn(sbi, "switch atgc option is not allowed");
  2112. goto restore_opts;
  2113. }
  2114. /* disallow enable/disable extent_cache dynamically */
  2115. if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
  2116. err = -EINVAL;
  2117. f2fs_warn(sbi, "switch extent_cache option is not allowed");
  2118. goto restore_opts;
  2119. }
  2120. /* disallow enable/disable age extent_cache dynamically */
  2121. if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
  2122. err = -EINVAL;
  2123. f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
  2124. goto restore_opts;
  2125. }
  2126. if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
  2127. err = -EINVAL;
  2128. f2fs_warn(sbi, "switch io_bits option is not allowed");
  2129. goto restore_opts;
  2130. }
  2131. if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
  2132. err = -EINVAL;
  2133. f2fs_warn(sbi, "switch compress_cache option is not allowed");
  2134. goto restore_opts;
  2135. }
  2136. if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
  2137. err = -EINVAL;
  2138. f2fs_warn(sbi, "switch discard_unit option is not allowed");
  2139. goto restore_opts;
  2140. }
  2141. if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
  2142. err = -EINVAL;
  2143. f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
  2144. goto restore_opts;
  2145. }
  2146. /*
  2147. * We stop the GC thread if FS is mounted as RO
  2148. * or if background_gc = off is passed in mount
  2149. * option. Also sync the filesystem.
  2150. */
  2151. if ((*flags & SB_RDONLY) ||
  2152. (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
  2153. !test_opt(sbi, GC_MERGE))) {
  2154. if (sbi->gc_thread) {
  2155. f2fs_stop_gc_thread(sbi);
  2156. need_restart_gc = true;
  2157. }
  2158. } else if (!sbi->gc_thread) {
  2159. err = f2fs_start_gc_thread(sbi);
  2160. if (err)
  2161. goto restore_opts;
  2162. need_stop_gc = true;
  2163. }
  2164. if (*flags & SB_RDONLY) {
  2165. sync_inodes_sb(sb);
  2166. set_sbi_flag(sbi, SBI_IS_DIRTY);
  2167. set_sbi_flag(sbi, SBI_IS_CLOSE);
  2168. f2fs_sync_fs(sb, 1);
  2169. clear_sbi_flag(sbi, SBI_IS_CLOSE);
  2170. }
  2171. if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
  2172. !test_opt(sbi, MERGE_CHECKPOINT)) {
  2173. f2fs_stop_ckpt_thread(sbi);
  2174. need_restart_ckpt = true;
  2175. } else {
  2176. /* Flush if the prevous checkpoint, if exists. */
  2177. f2fs_flush_ckpt_thread(sbi);
  2178. err = f2fs_start_ckpt_thread(sbi);
  2179. if (err) {
  2180. f2fs_err(sbi,
  2181. "Failed to start F2FS issue_checkpoint_thread (%d)",
  2182. err);
  2183. goto restore_gc;
  2184. }
  2185. need_stop_ckpt = true;
  2186. }
  2187. /*
  2188. * We stop issue flush thread if FS is mounted as RO
  2189. * or if flush_merge is not passed in mount option.
  2190. */
  2191. if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
  2192. clear_opt(sbi, FLUSH_MERGE);
  2193. f2fs_destroy_flush_cmd_control(sbi, false);
  2194. need_restart_flush = true;
  2195. } else {
  2196. err = f2fs_create_flush_cmd_control(sbi);
  2197. if (err)
  2198. goto restore_ckpt;
  2199. need_stop_flush = true;
  2200. }
  2201. if (no_discard == !!test_opt(sbi, DISCARD)) {
  2202. if (test_opt(sbi, DISCARD)) {
  2203. err = f2fs_start_discard_thread(sbi);
  2204. if (err)
  2205. goto restore_flush;
  2206. need_stop_discard = true;
  2207. } else {
  2208. f2fs_stop_discard_thread(sbi);
  2209. f2fs_issue_discard_timeout(sbi);
  2210. need_restart_discard = true;
  2211. }
  2212. }
  2213. if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
  2214. if (test_opt(sbi, DISABLE_CHECKPOINT)) {
  2215. err = f2fs_disable_checkpoint(sbi);
  2216. if (err)
  2217. goto restore_discard;
  2218. } else {
  2219. f2fs_enable_checkpoint(sbi);
  2220. }
  2221. }
  2222. skip:
  2223. #ifdef CONFIG_QUOTA
  2224. /* Release old quota file names */
  2225. for (i = 0; i < MAXQUOTAS; i++)
  2226. kfree(org_mount_opt.s_qf_names[i]);
  2227. #endif
  2228. /* Update the POSIXACL Flag */
  2229. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  2230. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  2231. limit_reserve_root(sbi);
  2232. adjust_unusable_cap_perc(sbi);
  2233. *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
  2234. return 0;
  2235. restore_discard:
  2236. if (need_restart_discard) {
  2237. if (f2fs_start_discard_thread(sbi))
  2238. f2fs_warn(sbi, "discard has been stopped");
  2239. } else if (need_stop_discard) {
  2240. f2fs_stop_discard_thread(sbi);
  2241. }
  2242. restore_flush:
  2243. if (need_restart_flush) {
  2244. if (f2fs_create_flush_cmd_control(sbi))
  2245. f2fs_warn(sbi, "background flush thread has stopped");
  2246. } else if (need_stop_flush) {
  2247. clear_opt(sbi, FLUSH_MERGE);
  2248. f2fs_destroy_flush_cmd_control(sbi, false);
  2249. }
  2250. restore_ckpt:
  2251. if (need_restart_ckpt) {
  2252. if (f2fs_start_ckpt_thread(sbi))
  2253. f2fs_warn(sbi, "background ckpt thread has stopped");
  2254. } else if (need_stop_ckpt) {
  2255. f2fs_stop_ckpt_thread(sbi);
  2256. }
  2257. restore_gc:
  2258. if (need_restart_gc) {
  2259. if (f2fs_start_gc_thread(sbi))
  2260. f2fs_warn(sbi, "background gc thread has stopped");
  2261. } else if (need_stop_gc) {
  2262. f2fs_stop_gc_thread(sbi);
  2263. }
  2264. restore_opts:
  2265. #ifdef CONFIG_QUOTA
  2266. F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
  2267. for (i = 0; i < MAXQUOTAS; i++) {
  2268. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  2269. F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
  2270. }
  2271. #endif
  2272. sbi->mount_opt = org_mount_opt;
  2273. sb->s_flags = old_sb_flags;
  2274. return err;
  2275. }
  2276. #ifdef CONFIG_QUOTA
  2277. static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
  2278. {
  2279. /* need to recovery orphan */
  2280. if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
  2281. return true;
  2282. /* need to recovery data */
  2283. if (test_opt(sbi, DISABLE_ROLL_FORWARD))
  2284. return false;
  2285. if (test_opt(sbi, NORECOVERY))
  2286. return false;
  2287. return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
  2288. }
  2289. static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
  2290. {
  2291. bool readonly = f2fs_readonly(sbi->sb);
  2292. if (!f2fs_need_recovery(sbi))
  2293. return false;
  2294. /* it doesn't need to check f2fs_sb_has_readonly() */
  2295. if (f2fs_hw_is_readonly(sbi))
  2296. return false;
  2297. if (readonly) {
  2298. sbi->sb->s_flags &= ~SB_RDONLY;
  2299. set_sbi_flag(sbi, SBI_IS_WRITABLE);
  2300. }
  2301. /*
  2302. * Turn on quotas which were not enabled for read-only mounts if
  2303. * filesystem has quota feature, so that they are updated correctly.
  2304. */
  2305. return f2fs_enable_quota_files(sbi, readonly);
  2306. }
  2307. static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
  2308. bool quota_enabled)
  2309. {
  2310. if (quota_enabled)
  2311. f2fs_quota_off_umount(sbi->sb);
  2312. if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
  2313. clear_sbi_flag(sbi, SBI_IS_WRITABLE);
  2314. sbi->sb->s_flags |= SB_RDONLY;
  2315. }
  2316. }
  2317. /* Read data from quotafile */
  2318. static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
  2319. size_t len, loff_t off)
  2320. {
  2321. struct inode *inode = sb_dqopt(sb)->files[type];
  2322. struct address_space *mapping = inode->i_mapping;
  2323. block_t blkidx = F2FS_BYTES_TO_BLK(off);
  2324. int offset = off & (sb->s_blocksize - 1);
  2325. int tocopy;
  2326. size_t toread;
  2327. loff_t i_size = i_size_read(inode);
  2328. struct page *page;
  2329. if (off > i_size)
  2330. return 0;
  2331. if (off + len > i_size)
  2332. len = i_size - off;
  2333. toread = len;
  2334. while (toread > 0) {
  2335. tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
  2336. repeat:
  2337. page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
  2338. if (IS_ERR(page)) {
  2339. if (PTR_ERR(page) == -ENOMEM) {
  2340. memalloc_retry_wait(GFP_NOFS);
  2341. goto repeat;
  2342. }
  2343. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2344. return PTR_ERR(page);
  2345. }
  2346. lock_page(page);
  2347. if (unlikely(page->mapping != mapping)) {
  2348. f2fs_put_page(page, 1);
  2349. goto repeat;
  2350. }
  2351. if (unlikely(!PageUptodate(page))) {
  2352. f2fs_put_page(page, 1);
  2353. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2354. return -EIO;
  2355. }
  2356. memcpy_from_page(data, page, offset, tocopy);
  2357. f2fs_put_page(page, 1);
  2358. offset = 0;
  2359. toread -= tocopy;
  2360. data += tocopy;
  2361. blkidx++;
  2362. }
  2363. return len;
  2364. }
  2365. /* Write to quotafile */
  2366. static ssize_t f2fs_quota_write(struct super_block *sb, int type,
  2367. const char *data, size_t len, loff_t off)
  2368. {
  2369. struct inode *inode = sb_dqopt(sb)->files[type];
  2370. struct address_space *mapping = inode->i_mapping;
  2371. const struct address_space_operations *a_ops = mapping->a_ops;
  2372. int offset = off & (sb->s_blocksize - 1);
  2373. size_t towrite = len;
  2374. struct page *page;
  2375. void *fsdata = NULL;
  2376. int err = 0;
  2377. int tocopy;
  2378. while (towrite > 0) {
  2379. tocopy = min_t(unsigned long, sb->s_blocksize - offset,
  2380. towrite);
  2381. retry:
  2382. err = a_ops->write_begin(NULL, mapping, off, tocopy,
  2383. &page, &fsdata);
  2384. if (unlikely(err)) {
  2385. if (err == -ENOMEM) {
  2386. f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
  2387. goto retry;
  2388. }
  2389. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2390. break;
  2391. }
  2392. memcpy_to_page(page, offset, data, tocopy);
  2393. a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
  2394. page, fsdata);
  2395. offset = 0;
  2396. towrite -= tocopy;
  2397. off += tocopy;
  2398. data += tocopy;
  2399. cond_resched();
  2400. }
  2401. if (len == towrite)
  2402. return err;
  2403. inode->i_mtime = inode->i_ctime = current_time(inode);
  2404. f2fs_mark_inode_dirty_sync(inode, false);
  2405. return len - towrite;
  2406. }
  2407. int f2fs_dquot_initialize(struct inode *inode)
  2408. {
  2409. if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
  2410. return -ESRCH;
  2411. return dquot_initialize(inode);
  2412. }
  2413. static struct dquot **f2fs_get_dquots(struct inode *inode)
  2414. {
  2415. return F2FS_I(inode)->i_dquot;
  2416. }
  2417. static qsize_t *f2fs_get_reserved_space(struct inode *inode)
  2418. {
  2419. return &F2FS_I(inode)->i_reserved_quota;
  2420. }
  2421. static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
  2422. {
  2423. if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
  2424. f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
  2425. return 0;
  2426. }
  2427. return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
  2428. F2FS_OPTION(sbi).s_jquota_fmt, type);
  2429. }
  2430. int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
  2431. {
  2432. int enabled = 0;
  2433. int i, err;
  2434. if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
  2435. err = f2fs_enable_quotas(sbi->sb);
  2436. if (err) {
  2437. f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
  2438. return 0;
  2439. }
  2440. return 1;
  2441. }
  2442. for (i = 0; i < MAXQUOTAS; i++) {
  2443. if (F2FS_OPTION(sbi).s_qf_names[i]) {
  2444. err = f2fs_quota_on_mount(sbi, i);
  2445. if (!err) {
  2446. enabled = 1;
  2447. continue;
  2448. }
  2449. f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
  2450. err, i);
  2451. }
  2452. }
  2453. return enabled;
  2454. }
  2455. static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
  2456. unsigned int flags)
  2457. {
  2458. struct inode *qf_inode;
  2459. unsigned long qf_inum;
  2460. int err;
  2461. BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
  2462. qf_inum = f2fs_qf_ino(sb, type);
  2463. if (!qf_inum)
  2464. return -EPERM;
  2465. qf_inode = f2fs_iget(sb, qf_inum);
  2466. if (IS_ERR(qf_inode)) {
  2467. f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
  2468. return PTR_ERR(qf_inode);
  2469. }
  2470. /* Don't account quota for quota files to avoid recursion */
  2471. qf_inode->i_flags |= S_NOQUOTA;
  2472. err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
  2473. iput(qf_inode);
  2474. return err;
  2475. }
  2476. static int f2fs_enable_quotas(struct super_block *sb)
  2477. {
  2478. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2479. int type, err = 0;
  2480. unsigned long qf_inum;
  2481. bool quota_mopt[MAXQUOTAS] = {
  2482. test_opt(sbi, USRQUOTA),
  2483. test_opt(sbi, GRPQUOTA),
  2484. test_opt(sbi, PRJQUOTA),
  2485. };
  2486. if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
  2487. f2fs_err(sbi, "quota file may be corrupted, skip loading it");
  2488. return 0;
  2489. }
  2490. sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
  2491. for (type = 0; type < MAXQUOTAS; type++) {
  2492. qf_inum = f2fs_qf_ino(sb, type);
  2493. if (qf_inum) {
  2494. err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
  2495. DQUOT_USAGE_ENABLED |
  2496. (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
  2497. if (err) {
  2498. f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
  2499. type, err);
  2500. for (type--; type >= 0; type--)
  2501. dquot_quota_off(sb, type);
  2502. set_sbi_flag(F2FS_SB(sb),
  2503. SBI_QUOTA_NEED_REPAIR);
  2504. return err;
  2505. }
  2506. }
  2507. }
  2508. return 0;
  2509. }
  2510. static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
  2511. {
  2512. struct quota_info *dqopt = sb_dqopt(sbi->sb);
  2513. struct address_space *mapping = dqopt->files[type]->i_mapping;
  2514. int ret = 0;
  2515. ret = dquot_writeback_dquots(sbi->sb, type);
  2516. if (ret)
  2517. goto out;
  2518. ret = filemap_fdatawrite(mapping);
  2519. if (ret)
  2520. goto out;
  2521. /* if we are using journalled quota */
  2522. if (is_journalled_quota(sbi))
  2523. goto out;
  2524. ret = filemap_fdatawait(mapping);
  2525. truncate_inode_pages(&dqopt->files[type]->i_data, 0);
  2526. out:
  2527. if (ret)
  2528. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2529. return ret;
  2530. }
  2531. int f2fs_quota_sync(struct super_block *sb, int type)
  2532. {
  2533. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2534. struct quota_info *dqopt = sb_dqopt(sb);
  2535. int cnt;
  2536. int ret = 0;
  2537. /*
  2538. * Now when everything is written we can discard the pagecache so
  2539. * that userspace sees the changes.
  2540. */
  2541. for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
  2542. if (type != -1 && cnt != type)
  2543. continue;
  2544. if (!sb_has_quota_active(sb, cnt))
  2545. continue;
  2546. if (!f2fs_sb_has_quota_ino(sbi))
  2547. inode_lock(dqopt->files[cnt]);
  2548. /*
  2549. * do_quotactl
  2550. * f2fs_quota_sync
  2551. * f2fs_down_read(quota_sem)
  2552. * dquot_writeback_dquots()
  2553. * f2fs_dquot_commit
  2554. * block_operation
  2555. * f2fs_down_read(quota_sem)
  2556. */
  2557. f2fs_lock_op(sbi);
  2558. f2fs_down_read(&sbi->quota_sem);
  2559. ret = f2fs_quota_sync_file(sbi, cnt);
  2560. f2fs_up_read(&sbi->quota_sem);
  2561. f2fs_unlock_op(sbi);
  2562. if (!f2fs_sb_has_quota_ino(sbi))
  2563. inode_unlock(dqopt->files[cnt]);
  2564. if (ret)
  2565. break;
  2566. }
  2567. return ret;
  2568. }
  2569. static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
  2570. const struct path *path)
  2571. {
  2572. struct inode *inode;
  2573. int err;
  2574. /* if quota sysfile exists, deny enabling quota with specific file */
  2575. if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
  2576. f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
  2577. return -EBUSY;
  2578. }
  2579. err = f2fs_quota_sync(sb, type);
  2580. if (err)
  2581. return err;
  2582. err = dquot_quota_on(sb, type, format_id, path);
  2583. if (err)
  2584. return err;
  2585. inode = d_inode(path->dentry);
  2586. inode_lock(inode);
  2587. F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
  2588. f2fs_set_inode_flags(inode);
  2589. inode_unlock(inode);
  2590. f2fs_mark_inode_dirty_sync(inode, false);
  2591. return 0;
  2592. }
  2593. static int __f2fs_quota_off(struct super_block *sb, int type)
  2594. {
  2595. struct inode *inode = sb_dqopt(sb)->files[type];
  2596. int err;
  2597. if (!inode || !igrab(inode))
  2598. return dquot_quota_off(sb, type);
  2599. err = f2fs_quota_sync(sb, type);
  2600. if (err)
  2601. goto out_put;
  2602. err = dquot_quota_off(sb, type);
  2603. if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
  2604. goto out_put;
  2605. inode_lock(inode);
  2606. F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
  2607. f2fs_set_inode_flags(inode);
  2608. inode_unlock(inode);
  2609. f2fs_mark_inode_dirty_sync(inode, false);
  2610. out_put:
  2611. iput(inode);
  2612. return err;
  2613. }
  2614. static int f2fs_quota_off(struct super_block *sb, int type)
  2615. {
  2616. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2617. int err;
  2618. err = __f2fs_quota_off(sb, type);
  2619. /*
  2620. * quotactl can shutdown journalled quota, result in inconsistence
  2621. * between quota record and fs data by following updates, tag the
  2622. * flag to let fsck be aware of it.
  2623. */
  2624. if (is_journalled_quota(sbi))
  2625. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2626. return err;
  2627. }
  2628. void f2fs_quota_off_umount(struct super_block *sb)
  2629. {
  2630. int type;
  2631. int err;
  2632. for (type = 0; type < MAXQUOTAS; type++) {
  2633. err = __f2fs_quota_off(sb, type);
  2634. if (err) {
  2635. int ret = dquot_quota_off(sb, type);
  2636. f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
  2637. type, err, ret);
  2638. set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
  2639. }
  2640. }
  2641. /*
  2642. * In case of checkpoint=disable, we must flush quota blocks.
  2643. * This can cause NULL exception for node_inode in end_io, since
  2644. * put_super already dropped it.
  2645. */
  2646. sync_filesystem(sb);
  2647. }
  2648. static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
  2649. {
  2650. struct quota_info *dqopt = sb_dqopt(sb);
  2651. int type;
  2652. for (type = 0; type < MAXQUOTAS; type++) {
  2653. if (!dqopt->files[type])
  2654. continue;
  2655. f2fs_inode_synced(dqopt->files[type]);
  2656. }
  2657. }
  2658. static int f2fs_dquot_commit(struct dquot *dquot)
  2659. {
  2660. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2661. int ret;
  2662. f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
  2663. ret = dquot_commit(dquot);
  2664. if (ret < 0)
  2665. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2666. f2fs_up_read(&sbi->quota_sem);
  2667. return ret;
  2668. }
  2669. static int f2fs_dquot_acquire(struct dquot *dquot)
  2670. {
  2671. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2672. int ret;
  2673. f2fs_down_read(&sbi->quota_sem);
  2674. ret = dquot_acquire(dquot);
  2675. if (ret < 0)
  2676. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2677. f2fs_up_read(&sbi->quota_sem);
  2678. return ret;
  2679. }
  2680. static int f2fs_dquot_release(struct dquot *dquot)
  2681. {
  2682. struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
  2683. int ret = dquot_release(dquot);
  2684. if (ret < 0)
  2685. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2686. return ret;
  2687. }
  2688. static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
  2689. {
  2690. struct super_block *sb = dquot->dq_sb;
  2691. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2692. int ret = dquot_mark_dquot_dirty(dquot);
  2693. /* if we are using journalled quota */
  2694. if (is_journalled_quota(sbi))
  2695. set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
  2696. return ret;
  2697. }
  2698. static int f2fs_dquot_commit_info(struct super_block *sb, int type)
  2699. {
  2700. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2701. int ret = dquot_commit_info(sb, type);
  2702. if (ret < 0)
  2703. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  2704. return ret;
  2705. }
  2706. static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
  2707. {
  2708. *projid = F2FS_I(inode)->i_projid;
  2709. return 0;
  2710. }
  2711. static const struct dquot_operations f2fs_quota_operations = {
  2712. .get_reserved_space = f2fs_get_reserved_space,
  2713. .write_dquot = f2fs_dquot_commit,
  2714. .acquire_dquot = f2fs_dquot_acquire,
  2715. .release_dquot = f2fs_dquot_release,
  2716. .mark_dirty = f2fs_dquot_mark_dquot_dirty,
  2717. .write_info = f2fs_dquot_commit_info,
  2718. .alloc_dquot = dquot_alloc,
  2719. .destroy_dquot = dquot_destroy,
  2720. .get_projid = f2fs_get_projid,
  2721. .get_next_id = dquot_get_next_id,
  2722. };
  2723. static const struct quotactl_ops f2fs_quotactl_ops = {
  2724. .quota_on = f2fs_quota_on,
  2725. .quota_off = f2fs_quota_off,
  2726. .quota_sync = f2fs_quota_sync,
  2727. .get_state = dquot_get_state,
  2728. .set_info = dquot_set_dqinfo,
  2729. .get_dqblk = dquot_get_dqblk,
  2730. .set_dqblk = dquot_set_dqblk,
  2731. .get_nextdqblk = dquot_get_next_dqblk,
  2732. };
  2733. #else
  2734. int f2fs_dquot_initialize(struct inode *inode)
  2735. {
  2736. return 0;
  2737. }
  2738. int f2fs_quota_sync(struct super_block *sb, int type)
  2739. {
  2740. return 0;
  2741. }
  2742. void f2fs_quota_off_umount(struct super_block *sb)
  2743. {
  2744. }
  2745. #endif
  2746. static const struct super_operations f2fs_sops = {
  2747. .alloc_inode = f2fs_alloc_inode,
  2748. .free_inode = f2fs_free_inode,
  2749. .drop_inode = f2fs_drop_inode,
  2750. .write_inode = f2fs_write_inode,
  2751. .dirty_inode = f2fs_dirty_inode,
  2752. .show_options = f2fs_show_options,
  2753. #ifdef CONFIG_QUOTA
  2754. .quota_read = f2fs_quota_read,
  2755. .quota_write = f2fs_quota_write,
  2756. .get_dquots = f2fs_get_dquots,
  2757. #endif
  2758. .evict_inode = f2fs_evict_inode,
  2759. .put_super = f2fs_put_super,
  2760. .sync_fs = f2fs_sync_fs,
  2761. .freeze_fs = f2fs_freeze,
  2762. .unfreeze_fs = f2fs_unfreeze,
  2763. .statfs = f2fs_statfs,
  2764. .remount_fs = f2fs_remount,
  2765. };
  2766. #ifdef CONFIG_FS_ENCRYPTION
  2767. static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
  2768. {
  2769. return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2770. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  2771. ctx, len, NULL);
  2772. }
  2773. static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
  2774. void *fs_data)
  2775. {
  2776. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  2777. /*
  2778. * Encrypting the root directory is not allowed because fsck
  2779. * expects lost+found directory to exist and remain unencrypted
  2780. * if LOST_FOUND feature is enabled.
  2781. *
  2782. */
  2783. if (f2fs_sb_has_lost_found(sbi) &&
  2784. inode->i_ino == F2FS_ROOT_INO(sbi))
  2785. return -EPERM;
  2786. return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
  2787. F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
  2788. ctx, len, fs_data, XATTR_CREATE);
  2789. }
  2790. static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
  2791. {
  2792. return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
  2793. }
  2794. static bool f2fs_has_stable_inodes(struct super_block *sb)
  2795. {
  2796. return true;
  2797. }
  2798. static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
  2799. int *ino_bits_ret, int *lblk_bits_ret)
  2800. {
  2801. *ino_bits_ret = 8 * sizeof(nid_t);
  2802. *lblk_bits_ret = 8 * sizeof(block_t);
  2803. }
  2804. static struct block_device **f2fs_get_devices(struct super_block *sb,
  2805. unsigned int *num_devs)
  2806. {
  2807. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2808. struct block_device **devs;
  2809. int i;
  2810. if (!f2fs_is_multi_device(sbi))
  2811. return NULL;
  2812. devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
  2813. if (!devs)
  2814. return ERR_PTR(-ENOMEM);
  2815. for (i = 0; i < sbi->s_ndevs; i++)
  2816. devs[i] = FDEV(i).bdev;
  2817. *num_devs = sbi->s_ndevs;
  2818. return devs;
  2819. }
  2820. static const struct fscrypt_operations f2fs_cryptops = {
  2821. .flags = FS_CFLG_SUPPORTS_SUBBLOCK_DATA_UNITS,
  2822. .key_prefix = "f2fs:",
  2823. .get_context = f2fs_get_context,
  2824. .set_context = f2fs_set_context,
  2825. .get_dummy_policy = f2fs_get_dummy_policy,
  2826. .empty_dir = f2fs_empty_dir,
  2827. .has_stable_inodes = f2fs_has_stable_inodes,
  2828. .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
  2829. .get_devices = f2fs_get_devices,
  2830. };
  2831. #endif
  2832. static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
  2833. u64 ino, u32 generation)
  2834. {
  2835. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  2836. struct inode *inode;
  2837. if (f2fs_check_nid_range(sbi, ino))
  2838. return ERR_PTR(-ESTALE);
  2839. /*
  2840. * f2fs_iget isn't quite right if the inode is currently unallocated!
  2841. * However f2fs_iget currently does appropriate checks to handle stale
  2842. * inodes so everything is OK.
  2843. */
  2844. inode = f2fs_iget(sb, ino);
  2845. if (IS_ERR(inode))
  2846. return ERR_CAST(inode);
  2847. if (unlikely(generation && inode->i_generation != generation)) {
  2848. /* we didn't find the right inode.. */
  2849. iput(inode);
  2850. return ERR_PTR(-ESTALE);
  2851. }
  2852. return inode;
  2853. }
  2854. static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
  2855. int fh_len, int fh_type)
  2856. {
  2857. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  2858. f2fs_nfs_get_inode);
  2859. }
  2860. static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
  2861. int fh_len, int fh_type)
  2862. {
  2863. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  2864. f2fs_nfs_get_inode);
  2865. }
  2866. static const struct export_operations f2fs_export_ops = {
  2867. .fh_to_dentry = f2fs_fh_to_dentry,
  2868. .fh_to_parent = f2fs_fh_to_parent,
  2869. .get_parent = f2fs_get_parent,
  2870. };
  2871. loff_t max_file_blocks(struct inode *inode)
  2872. {
  2873. loff_t result = 0;
  2874. loff_t leaf_count;
  2875. /*
  2876. * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
  2877. * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
  2878. * space in inode.i_addr, it will be more safe to reassign
  2879. * result as zero.
  2880. */
  2881. if (inode && f2fs_compressed_file(inode))
  2882. leaf_count = ADDRS_PER_BLOCK(inode);
  2883. else
  2884. leaf_count = DEF_ADDRS_PER_BLOCK;
  2885. /* two direct node blocks */
  2886. result += (leaf_count * 2);
  2887. /* two indirect node blocks */
  2888. leaf_count *= NIDS_PER_BLOCK;
  2889. result += (leaf_count * 2);
  2890. /* one double indirect node block */
  2891. leaf_count *= NIDS_PER_BLOCK;
  2892. result += leaf_count;
  2893. /*
  2894. * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
  2895. * a 4K crypto data unit, we must restrict the max filesize to what can
  2896. * fit within U32_MAX + 1 data units.
  2897. */
  2898. result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
  2899. return result;
  2900. }
  2901. static int __f2fs_commit_super(struct buffer_head *bh,
  2902. struct f2fs_super_block *super)
  2903. {
  2904. lock_buffer(bh);
  2905. if (super)
  2906. memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
  2907. set_buffer_dirty(bh);
  2908. unlock_buffer(bh);
  2909. /* it's rare case, we can do fua all the time */
  2910. return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
  2911. }
  2912. static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
  2913. struct buffer_head *bh)
  2914. {
  2915. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  2916. (bh->b_data + F2FS_SUPER_OFFSET);
  2917. struct super_block *sb = sbi->sb;
  2918. u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
  2919. u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
  2920. u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
  2921. u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
  2922. u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
  2923. u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
  2924. u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
  2925. u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
  2926. u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
  2927. u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
  2928. u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  2929. u32 segment_count = le32_to_cpu(raw_super->segment_count);
  2930. u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  2931. u64 main_end_blkaddr = main_blkaddr +
  2932. (segment_count_main << log_blocks_per_seg);
  2933. u64 seg_end_blkaddr = segment0_blkaddr +
  2934. (segment_count << log_blocks_per_seg);
  2935. if (segment0_blkaddr != cp_blkaddr) {
  2936. f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
  2937. segment0_blkaddr, cp_blkaddr);
  2938. return true;
  2939. }
  2940. if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
  2941. sit_blkaddr) {
  2942. f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
  2943. cp_blkaddr, sit_blkaddr,
  2944. segment_count_ckpt << log_blocks_per_seg);
  2945. return true;
  2946. }
  2947. if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
  2948. nat_blkaddr) {
  2949. f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
  2950. sit_blkaddr, nat_blkaddr,
  2951. segment_count_sit << log_blocks_per_seg);
  2952. return true;
  2953. }
  2954. if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
  2955. ssa_blkaddr) {
  2956. f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
  2957. nat_blkaddr, ssa_blkaddr,
  2958. segment_count_nat << log_blocks_per_seg);
  2959. return true;
  2960. }
  2961. if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
  2962. main_blkaddr) {
  2963. f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
  2964. ssa_blkaddr, main_blkaddr,
  2965. segment_count_ssa << log_blocks_per_seg);
  2966. return true;
  2967. }
  2968. if (main_end_blkaddr > seg_end_blkaddr) {
  2969. f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
  2970. main_blkaddr, seg_end_blkaddr,
  2971. segment_count_main << log_blocks_per_seg);
  2972. return true;
  2973. } else if (main_end_blkaddr < seg_end_blkaddr) {
  2974. int err = 0;
  2975. char *res;
  2976. /* fix in-memory information all the time */
  2977. raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
  2978. segment0_blkaddr) >> log_blocks_per_seg);
  2979. if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
  2980. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  2981. res = "internally";
  2982. } else {
  2983. err = __f2fs_commit_super(bh, NULL);
  2984. res = err ? "failed" : "done";
  2985. }
  2986. f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
  2987. res, main_blkaddr, seg_end_blkaddr,
  2988. segment_count_main << log_blocks_per_seg);
  2989. if (err)
  2990. return true;
  2991. }
  2992. return false;
  2993. }
  2994. static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
  2995. struct buffer_head *bh)
  2996. {
  2997. block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
  2998. block_t total_sections, blocks_per_seg;
  2999. struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
  3000. (bh->b_data + F2FS_SUPER_OFFSET);
  3001. size_t crc_offset = 0;
  3002. __u32 crc = 0;
  3003. if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
  3004. f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
  3005. F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
  3006. return -EINVAL;
  3007. }
  3008. /* Check checksum_offset and crc in superblock */
  3009. if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
  3010. crc_offset = le32_to_cpu(raw_super->checksum_offset);
  3011. if (crc_offset !=
  3012. offsetof(struct f2fs_super_block, crc)) {
  3013. f2fs_info(sbi, "Invalid SB checksum offset: %zu",
  3014. crc_offset);
  3015. return -EFSCORRUPTED;
  3016. }
  3017. crc = le32_to_cpu(raw_super->crc);
  3018. if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
  3019. f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
  3020. return -EFSCORRUPTED;
  3021. }
  3022. }
  3023. /* Currently, support only 4KB block size */
  3024. if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
  3025. f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
  3026. le32_to_cpu(raw_super->log_blocksize),
  3027. F2FS_BLKSIZE_BITS);
  3028. return -EFSCORRUPTED;
  3029. }
  3030. /* check log blocks per segment */
  3031. if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
  3032. f2fs_info(sbi, "Invalid log blocks per segment (%u)",
  3033. le32_to_cpu(raw_super->log_blocks_per_seg));
  3034. return -EFSCORRUPTED;
  3035. }
  3036. /* Currently, support 512/1024/2048/4096/16K bytes sector size */
  3037. if (le32_to_cpu(raw_super->log_sectorsize) >
  3038. F2FS_MAX_LOG_SECTOR_SIZE ||
  3039. le32_to_cpu(raw_super->log_sectorsize) <
  3040. F2FS_MIN_LOG_SECTOR_SIZE) {
  3041. f2fs_info(sbi, "Invalid log sectorsize (%u)",
  3042. le32_to_cpu(raw_super->log_sectorsize));
  3043. return -EFSCORRUPTED;
  3044. }
  3045. if (le32_to_cpu(raw_super->log_sectors_per_block) +
  3046. le32_to_cpu(raw_super->log_sectorsize) !=
  3047. F2FS_MAX_LOG_SECTOR_SIZE) {
  3048. f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
  3049. le32_to_cpu(raw_super->log_sectors_per_block),
  3050. le32_to_cpu(raw_super->log_sectorsize));
  3051. return -EFSCORRUPTED;
  3052. }
  3053. segment_count = le32_to_cpu(raw_super->segment_count);
  3054. segment_count_main = le32_to_cpu(raw_super->segment_count_main);
  3055. segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  3056. secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  3057. total_sections = le32_to_cpu(raw_super->section_count);
  3058. /* blocks_per_seg should be 512, given the above check */
  3059. blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
  3060. if (segment_count > F2FS_MAX_SEGMENT ||
  3061. segment_count < F2FS_MIN_SEGMENTS) {
  3062. f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
  3063. return -EFSCORRUPTED;
  3064. }
  3065. if (total_sections > segment_count_main || total_sections < 1 ||
  3066. segs_per_sec > segment_count || !segs_per_sec) {
  3067. f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
  3068. segment_count, total_sections, segs_per_sec);
  3069. return -EFSCORRUPTED;
  3070. }
  3071. if (segment_count_main != total_sections * segs_per_sec) {
  3072. f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
  3073. segment_count_main, total_sections, segs_per_sec);
  3074. return -EFSCORRUPTED;
  3075. }
  3076. if ((segment_count / segs_per_sec) < total_sections) {
  3077. f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
  3078. segment_count, segs_per_sec, total_sections);
  3079. return -EFSCORRUPTED;
  3080. }
  3081. if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
  3082. f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
  3083. segment_count, le64_to_cpu(raw_super->block_count));
  3084. return -EFSCORRUPTED;
  3085. }
  3086. if (RDEV(0).path[0]) {
  3087. block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
  3088. int i = 1;
  3089. while (i < MAX_DEVICES && RDEV(i).path[0]) {
  3090. dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
  3091. i++;
  3092. }
  3093. if (segment_count != dev_seg_count) {
  3094. f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
  3095. segment_count, dev_seg_count);
  3096. return -EFSCORRUPTED;
  3097. }
  3098. } else {
  3099. if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
  3100. !bdev_is_zoned(sbi->sb->s_bdev)) {
  3101. f2fs_info(sbi, "Zoned block device path is missing");
  3102. return -EFSCORRUPTED;
  3103. }
  3104. }
  3105. if (secs_per_zone > total_sections || !secs_per_zone) {
  3106. f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
  3107. secs_per_zone, total_sections);
  3108. return -EFSCORRUPTED;
  3109. }
  3110. if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
  3111. raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
  3112. (le32_to_cpu(raw_super->extension_count) +
  3113. raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
  3114. f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
  3115. le32_to_cpu(raw_super->extension_count),
  3116. raw_super->hot_ext_count,
  3117. F2FS_MAX_EXTENSION);
  3118. return -EFSCORRUPTED;
  3119. }
  3120. if (le32_to_cpu(raw_super->cp_payload) >=
  3121. (blocks_per_seg - F2FS_CP_PACKS -
  3122. NR_CURSEG_PERSIST_TYPE)) {
  3123. f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
  3124. le32_to_cpu(raw_super->cp_payload),
  3125. blocks_per_seg - F2FS_CP_PACKS -
  3126. NR_CURSEG_PERSIST_TYPE);
  3127. return -EFSCORRUPTED;
  3128. }
  3129. /* check reserved ino info */
  3130. if (le32_to_cpu(raw_super->node_ino) != 1 ||
  3131. le32_to_cpu(raw_super->meta_ino) != 2 ||
  3132. le32_to_cpu(raw_super->root_ino) != 3) {
  3133. f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
  3134. le32_to_cpu(raw_super->node_ino),
  3135. le32_to_cpu(raw_super->meta_ino),
  3136. le32_to_cpu(raw_super->root_ino));
  3137. return -EFSCORRUPTED;
  3138. }
  3139. /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
  3140. if (sanity_check_area_boundary(sbi, bh))
  3141. return -EFSCORRUPTED;
  3142. return 0;
  3143. }
  3144. int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
  3145. {
  3146. unsigned int total, fsmeta;
  3147. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  3148. struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
  3149. unsigned int ovp_segments, reserved_segments;
  3150. unsigned int main_segs, blocks_per_seg;
  3151. unsigned int sit_segs, nat_segs;
  3152. unsigned int sit_bitmap_size, nat_bitmap_size;
  3153. unsigned int log_blocks_per_seg;
  3154. unsigned int segment_count_main;
  3155. unsigned int cp_pack_start_sum, cp_payload;
  3156. block_t user_block_count, valid_user_blocks;
  3157. block_t avail_node_count, valid_node_count;
  3158. unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
  3159. int i, j;
  3160. total = le32_to_cpu(raw_super->segment_count);
  3161. fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
  3162. sit_segs = le32_to_cpu(raw_super->segment_count_sit);
  3163. fsmeta += sit_segs;
  3164. nat_segs = le32_to_cpu(raw_super->segment_count_nat);
  3165. fsmeta += nat_segs;
  3166. fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
  3167. fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
  3168. if (unlikely(fsmeta >= total))
  3169. return 1;
  3170. ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
  3171. reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
  3172. if (!f2fs_sb_has_readonly(sbi) &&
  3173. unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
  3174. ovp_segments == 0 || reserved_segments == 0)) {
  3175. f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
  3176. return 1;
  3177. }
  3178. user_block_count = le64_to_cpu(ckpt->user_block_count);
  3179. segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
  3180. (f2fs_sb_has_readonly(sbi) ? 1 : 0);
  3181. log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  3182. if (!user_block_count || user_block_count >=
  3183. segment_count_main << log_blocks_per_seg) {
  3184. f2fs_err(sbi, "Wrong user_block_count: %u",
  3185. user_block_count);
  3186. return 1;
  3187. }
  3188. valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
  3189. if (valid_user_blocks > user_block_count) {
  3190. f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
  3191. valid_user_blocks, user_block_count);
  3192. return 1;
  3193. }
  3194. valid_node_count = le32_to_cpu(ckpt->valid_node_count);
  3195. avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
  3196. if (valid_node_count > avail_node_count) {
  3197. f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
  3198. valid_node_count, avail_node_count);
  3199. return 1;
  3200. }
  3201. main_segs = le32_to_cpu(raw_super->segment_count_main);
  3202. blocks_per_seg = sbi->blocks_per_seg;
  3203. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  3204. if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
  3205. le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
  3206. return 1;
  3207. if (f2fs_sb_has_readonly(sbi))
  3208. goto check_data;
  3209. for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
  3210. if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
  3211. le32_to_cpu(ckpt->cur_node_segno[j])) {
  3212. f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
  3213. i, j,
  3214. le32_to_cpu(ckpt->cur_node_segno[i]));
  3215. return 1;
  3216. }
  3217. }
  3218. }
  3219. check_data:
  3220. for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
  3221. if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
  3222. le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
  3223. return 1;
  3224. if (f2fs_sb_has_readonly(sbi))
  3225. goto skip_cross;
  3226. for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
  3227. if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
  3228. le32_to_cpu(ckpt->cur_data_segno[j])) {
  3229. f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
  3230. i, j,
  3231. le32_to_cpu(ckpt->cur_data_segno[i]));
  3232. return 1;
  3233. }
  3234. }
  3235. }
  3236. for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
  3237. for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
  3238. if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
  3239. le32_to_cpu(ckpt->cur_data_segno[j])) {
  3240. f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
  3241. i, j,
  3242. le32_to_cpu(ckpt->cur_node_segno[i]));
  3243. return 1;
  3244. }
  3245. }
  3246. }
  3247. skip_cross:
  3248. sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
  3249. nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
  3250. if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
  3251. nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
  3252. f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
  3253. sit_bitmap_size, nat_bitmap_size);
  3254. return 1;
  3255. }
  3256. cp_pack_start_sum = __start_sum_addr(sbi);
  3257. cp_payload = __cp_payload(sbi);
  3258. if (cp_pack_start_sum < cp_payload + 1 ||
  3259. cp_pack_start_sum > blocks_per_seg - 1 -
  3260. NR_CURSEG_PERSIST_TYPE) {
  3261. f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
  3262. cp_pack_start_sum);
  3263. return 1;
  3264. }
  3265. if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
  3266. le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
  3267. f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
  3268. "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
  3269. "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
  3270. le32_to_cpu(ckpt->checksum_offset));
  3271. return 1;
  3272. }
  3273. nat_blocks = nat_segs << log_blocks_per_seg;
  3274. nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
  3275. nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
  3276. if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
  3277. (cp_payload + F2FS_CP_PACKS +
  3278. NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
  3279. f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
  3280. cp_payload, nat_bits_blocks);
  3281. return 1;
  3282. }
  3283. if (unlikely(f2fs_cp_error(sbi))) {
  3284. f2fs_err(sbi, "A bug case: need to run fsck");
  3285. return 1;
  3286. }
  3287. return 0;
  3288. }
  3289. static void init_sb_info(struct f2fs_sb_info *sbi)
  3290. {
  3291. struct f2fs_super_block *raw_super = sbi->raw_super;
  3292. int i;
  3293. sbi->log_sectors_per_block =
  3294. le32_to_cpu(raw_super->log_sectors_per_block);
  3295. sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
  3296. sbi->blocksize = BIT(sbi->log_blocksize);
  3297. sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
  3298. sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
  3299. sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
  3300. sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
  3301. sbi->total_sections = le32_to_cpu(raw_super->section_count);
  3302. sbi->total_node_count =
  3303. (le32_to_cpu(raw_super->segment_count_nat) / 2)
  3304. * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
  3305. F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
  3306. F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
  3307. F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
  3308. sbi->cur_victim_sec = NULL_SECNO;
  3309. sbi->gc_mode = GC_NORMAL;
  3310. sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
  3311. sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
  3312. sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
  3313. sbi->migration_granularity = sbi->segs_per_sec;
  3314. sbi->seq_file_ra_mul = MIN_RA_MUL;
  3315. sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
  3316. sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
  3317. spin_lock_init(&sbi->gc_remaining_trials_lock);
  3318. atomic64_set(&sbi->current_atomic_write, 0);
  3319. sbi->dir_level = DEF_DIR_LEVEL;
  3320. sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
  3321. sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
  3322. sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
  3323. sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
  3324. sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
  3325. sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
  3326. DEF_UMOUNT_DISCARD_TIMEOUT;
  3327. clear_sbi_flag(sbi, SBI_NEED_FSCK);
  3328. for (i = 0; i < NR_COUNT_TYPE; i++)
  3329. atomic_set(&sbi->nr_pages[i], 0);
  3330. for (i = 0; i < META; i++)
  3331. atomic_set(&sbi->wb_sync_req[i], 0);
  3332. INIT_LIST_HEAD(&sbi->s_list);
  3333. mutex_init(&sbi->umount_mutex);
  3334. init_f2fs_rwsem(&sbi->io_order_lock);
  3335. spin_lock_init(&sbi->cp_lock);
  3336. sbi->dirty_device = 0;
  3337. spin_lock_init(&sbi->dev_lock);
  3338. init_f2fs_rwsem(&sbi->sb_lock);
  3339. init_f2fs_rwsem(&sbi->pin_sem);
  3340. }
  3341. static int init_percpu_info(struct f2fs_sb_info *sbi)
  3342. {
  3343. int err;
  3344. err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
  3345. if (err)
  3346. return err;
  3347. err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
  3348. if (err)
  3349. goto err_valid_block;
  3350. err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
  3351. GFP_KERNEL);
  3352. if (err)
  3353. goto err_node_block;
  3354. return 0;
  3355. err_node_block:
  3356. percpu_counter_destroy(&sbi->rf_node_block_count);
  3357. err_valid_block:
  3358. percpu_counter_destroy(&sbi->alloc_valid_block_count);
  3359. return err;
  3360. }
  3361. #ifdef CONFIG_BLK_DEV_ZONED
  3362. struct f2fs_report_zones_args {
  3363. struct f2fs_sb_info *sbi;
  3364. struct f2fs_dev_info *dev;
  3365. };
  3366. static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
  3367. void *data)
  3368. {
  3369. struct f2fs_report_zones_args *rz_args = data;
  3370. block_t unusable_blocks = (zone->len - zone->capacity) >>
  3371. F2FS_LOG_SECTORS_PER_BLOCK;
  3372. if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
  3373. return 0;
  3374. set_bit(idx, rz_args->dev->blkz_seq);
  3375. if (!rz_args->sbi->unusable_blocks_per_sec) {
  3376. rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
  3377. return 0;
  3378. }
  3379. if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
  3380. f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
  3381. return -EINVAL;
  3382. }
  3383. return 0;
  3384. }
  3385. static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
  3386. {
  3387. struct block_device *bdev = FDEV(devi).bdev;
  3388. sector_t nr_sectors = bdev_nr_sectors(bdev);
  3389. struct f2fs_report_zones_args rep_zone_arg;
  3390. u64 zone_sectors;
  3391. int ret;
  3392. if (!f2fs_sb_has_blkzoned(sbi))
  3393. return 0;
  3394. zone_sectors = bdev_zone_sectors(bdev);
  3395. if (!is_power_of_2(zone_sectors)) {
  3396. f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
  3397. return -EINVAL;
  3398. }
  3399. if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
  3400. SECTOR_TO_BLOCK(zone_sectors))
  3401. return -EINVAL;
  3402. sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
  3403. FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
  3404. sbi->blocks_per_blkz);
  3405. if (nr_sectors & (zone_sectors - 1))
  3406. FDEV(devi).nr_blkz++;
  3407. FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
  3408. BITS_TO_LONGS(FDEV(devi).nr_blkz)
  3409. * sizeof(unsigned long),
  3410. GFP_KERNEL);
  3411. if (!FDEV(devi).blkz_seq)
  3412. return -ENOMEM;
  3413. rep_zone_arg.sbi = sbi;
  3414. rep_zone_arg.dev = &FDEV(devi);
  3415. ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
  3416. &rep_zone_arg);
  3417. if (ret < 0)
  3418. return ret;
  3419. return 0;
  3420. }
  3421. #endif
  3422. /*
  3423. * Read f2fs raw super block.
  3424. * Because we have two copies of super block, so read both of them
  3425. * to get the first valid one. If any one of them is broken, we pass
  3426. * them recovery flag back to the caller.
  3427. */
  3428. static int read_raw_super_block(struct f2fs_sb_info *sbi,
  3429. struct f2fs_super_block **raw_super,
  3430. int *valid_super_block, int *recovery)
  3431. {
  3432. struct super_block *sb = sbi->sb;
  3433. int block;
  3434. struct buffer_head *bh;
  3435. struct f2fs_super_block *super;
  3436. int err = 0;
  3437. super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
  3438. if (!super)
  3439. return -ENOMEM;
  3440. for (block = 0; block < 2; block++) {
  3441. bh = sb_bread(sb, block);
  3442. if (!bh) {
  3443. f2fs_err(sbi, "Unable to read %dth superblock",
  3444. block + 1);
  3445. err = -EIO;
  3446. *recovery = 1;
  3447. continue;
  3448. }
  3449. /* sanity checking of raw super */
  3450. err = sanity_check_raw_super(sbi, bh);
  3451. if (err) {
  3452. f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
  3453. block + 1);
  3454. brelse(bh);
  3455. *recovery = 1;
  3456. continue;
  3457. }
  3458. if (!*raw_super) {
  3459. memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
  3460. sizeof(*super));
  3461. *valid_super_block = block;
  3462. *raw_super = super;
  3463. }
  3464. brelse(bh);
  3465. }
  3466. /* No valid superblock */
  3467. if (!*raw_super)
  3468. kfree(super);
  3469. else
  3470. err = 0;
  3471. return err;
  3472. }
  3473. int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
  3474. {
  3475. struct buffer_head *bh;
  3476. __u32 crc = 0;
  3477. int err;
  3478. if ((recover && f2fs_readonly(sbi->sb)) ||
  3479. f2fs_hw_is_readonly(sbi)) {
  3480. set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
  3481. return -EROFS;
  3482. }
  3483. /* we should update superblock crc here */
  3484. if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
  3485. crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
  3486. offsetof(struct f2fs_super_block, crc));
  3487. F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
  3488. }
  3489. /* write back-up superblock first */
  3490. bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
  3491. if (!bh)
  3492. return -EIO;
  3493. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  3494. brelse(bh);
  3495. /* if we are in recovery path, skip writing valid superblock */
  3496. if (recover || err)
  3497. return err;
  3498. /* write current valid superblock */
  3499. bh = sb_bread(sbi->sb, sbi->valid_super_block);
  3500. if (!bh)
  3501. return -EIO;
  3502. err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
  3503. brelse(bh);
  3504. return err;
  3505. }
  3506. void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
  3507. {
  3508. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  3509. int err;
  3510. f2fs_down_write(&sbi->sb_lock);
  3511. if (raw_super->s_stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
  3512. raw_super->s_stop_reason[reason]++;
  3513. err = f2fs_commit_super(sbi, false);
  3514. if (err)
  3515. f2fs_err(sbi, "f2fs_commit_super fails to record reason:%u err:%d",
  3516. reason, err);
  3517. f2fs_up_write(&sbi->sb_lock);
  3518. }
  3519. void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
  3520. {
  3521. spin_lock(&sbi->error_lock);
  3522. if (!test_bit(flag, (unsigned long *)sbi->errors)) {
  3523. set_bit(flag, (unsigned long *)sbi->errors);
  3524. sbi->error_dirty = true;
  3525. }
  3526. spin_unlock(&sbi->error_lock);
  3527. }
  3528. static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
  3529. {
  3530. bool need_update = false;
  3531. spin_lock(&sbi->error_lock);
  3532. if (sbi->error_dirty) {
  3533. memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
  3534. MAX_F2FS_ERRORS);
  3535. sbi->error_dirty = false;
  3536. need_update = true;
  3537. }
  3538. spin_unlock(&sbi->error_lock);
  3539. return need_update;
  3540. }
  3541. void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
  3542. {
  3543. int err;
  3544. f2fs_save_errors(sbi, error);
  3545. f2fs_down_write(&sbi->sb_lock);
  3546. if (!f2fs_update_errors(sbi))
  3547. goto out_unlock;
  3548. err = f2fs_commit_super(sbi, false);
  3549. if (err)
  3550. f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d",
  3551. error, err);
  3552. out_unlock:
  3553. f2fs_up_write(&sbi->sb_lock);
  3554. }
  3555. static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
  3556. {
  3557. struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
  3558. unsigned int max_devices = MAX_DEVICES;
  3559. unsigned int logical_blksize;
  3560. int i;
  3561. /* Initialize single device information */
  3562. if (!RDEV(0).path[0]) {
  3563. if (!bdev_is_zoned(sbi->sb->s_bdev))
  3564. return 0;
  3565. max_devices = 1;
  3566. }
  3567. /*
  3568. * Initialize multiple devices information, or single
  3569. * zoned block device information.
  3570. */
  3571. sbi->devs = f2fs_kzalloc(sbi,
  3572. array_size(max_devices,
  3573. sizeof(struct f2fs_dev_info)),
  3574. GFP_KERNEL);
  3575. if (!sbi->devs)
  3576. return -ENOMEM;
  3577. logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
  3578. sbi->aligned_blksize = true;
  3579. for (i = 0; i < max_devices; i++) {
  3580. if (i > 0 && !RDEV(i).path[0])
  3581. break;
  3582. if (max_devices == 1) {
  3583. /* Single zoned block device mount */
  3584. FDEV(0).bdev =
  3585. blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
  3586. sbi->sb->s_mode, sbi->sb->s_type);
  3587. } else {
  3588. /* Multi-device mount */
  3589. memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
  3590. FDEV(i).total_segments =
  3591. le32_to_cpu(RDEV(i).total_segments);
  3592. if (i == 0) {
  3593. FDEV(i).start_blk = 0;
  3594. FDEV(i).end_blk = FDEV(i).start_blk +
  3595. (FDEV(i).total_segments <<
  3596. sbi->log_blocks_per_seg) - 1 +
  3597. le32_to_cpu(raw_super->segment0_blkaddr);
  3598. } else {
  3599. FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
  3600. FDEV(i).end_blk = FDEV(i).start_blk +
  3601. (FDEV(i).total_segments <<
  3602. sbi->log_blocks_per_seg) - 1;
  3603. }
  3604. FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
  3605. sbi->sb->s_mode, sbi->sb->s_type);
  3606. }
  3607. if (IS_ERR(FDEV(i).bdev))
  3608. return PTR_ERR(FDEV(i).bdev);
  3609. /* to release errored devices */
  3610. sbi->s_ndevs = i + 1;
  3611. if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
  3612. sbi->aligned_blksize = false;
  3613. #ifdef CONFIG_BLK_DEV_ZONED
  3614. if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
  3615. !f2fs_sb_has_blkzoned(sbi)) {
  3616. f2fs_err(sbi, "Zoned block device feature not enabled");
  3617. return -EINVAL;
  3618. }
  3619. if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
  3620. if (init_blkz_info(sbi, i)) {
  3621. f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
  3622. return -EINVAL;
  3623. }
  3624. if (max_devices == 1)
  3625. break;
  3626. f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
  3627. i, FDEV(i).path,
  3628. FDEV(i).total_segments,
  3629. FDEV(i).start_blk, FDEV(i).end_blk,
  3630. bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
  3631. "Host-aware" : "Host-managed");
  3632. continue;
  3633. }
  3634. #endif
  3635. f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
  3636. i, FDEV(i).path,
  3637. FDEV(i).total_segments,
  3638. FDEV(i).start_blk, FDEV(i).end_blk);
  3639. }
  3640. f2fs_info(sbi,
  3641. "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi));
  3642. return 0;
  3643. }
  3644. static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
  3645. {
  3646. #if IS_ENABLED(CONFIG_UNICODE)
  3647. if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
  3648. const struct f2fs_sb_encodings *encoding_info;
  3649. struct unicode_map *encoding;
  3650. __u16 encoding_flags;
  3651. encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
  3652. if (!encoding_info) {
  3653. f2fs_err(sbi,
  3654. "Encoding requested by superblock is unknown");
  3655. return -EINVAL;
  3656. }
  3657. encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
  3658. encoding = utf8_load(encoding_info->version);
  3659. if (IS_ERR(encoding)) {
  3660. f2fs_err(sbi,
  3661. "can't mount with superblock charset: %s-%u.%u.%u "
  3662. "not supported by the kernel. flags: 0x%x.",
  3663. encoding_info->name,
  3664. unicode_major(encoding_info->version),
  3665. unicode_minor(encoding_info->version),
  3666. unicode_rev(encoding_info->version),
  3667. encoding_flags);
  3668. return PTR_ERR(encoding);
  3669. }
  3670. f2fs_info(sbi, "Using encoding defined by superblock: "
  3671. "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
  3672. unicode_major(encoding_info->version),
  3673. unicode_minor(encoding_info->version),
  3674. unicode_rev(encoding_info->version),
  3675. encoding_flags);
  3676. sbi->sb->s_encoding = encoding;
  3677. sbi->sb->s_encoding_flags = encoding_flags;
  3678. }
  3679. #else
  3680. if (f2fs_sb_has_casefold(sbi)) {
  3681. f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
  3682. return -EINVAL;
  3683. }
  3684. #endif
  3685. return 0;
  3686. }
  3687. static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
  3688. {
  3689. /* adjust parameters according to the volume size */
  3690. if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
  3691. if (f2fs_block_unit_discard(sbi))
  3692. SM_I(sbi)->dcc_info->discard_granularity =
  3693. MIN_DISCARD_GRANULARITY;
  3694. if (!f2fs_lfs_mode(sbi))
  3695. SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
  3696. BIT(F2FS_IPU_HONOR_OPU_WRITE);
  3697. }
  3698. sbi->readdir_ra = true;
  3699. }
  3700. static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
  3701. {
  3702. struct f2fs_sb_info *sbi;
  3703. struct f2fs_super_block *raw_super;
  3704. struct inode *root;
  3705. int err;
  3706. bool skip_recovery = false, need_fsck = false;
  3707. char *options = NULL;
  3708. int recovery, i, valid_super_block;
  3709. struct curseg_info *seg_i;
  3710. int retry_cnt = 1;
  3711. #ifdef CONFIG_QUOTA
  3712. bool quota_enabled = false;
  3713. #endif
  3714. try_onemore:
  3715. err = -EINVAL;
  3716. raw_super = NULL;
  3717. valid_super_block = -1;
  3718. recovery = 0;
  3719. /* allocate memory for f2fs-specific super block info */
  3720. sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
  3721. if (!sbi)
  3722. return -ENOMEM;
  3723. sbi->sb = sb;
  3724. /* initialize locks within allocated memory */
  3725. init_f2fs_rwsem(&sbi->gc_lock);
  3726. mutex_init(&sbi->writepages);
  3727. init_f2fs_rwsem(&sbi->cp_global_sem);
  3728. init_f2fs_rwsem(&sbi->node_write);
  3729. init_f2fs_rwsem(&sbi->node_change);
  3730. spin_lock_init(&sbi->stat_lock);
  3731. init_f2fs_rwsem(&sbi->cp_rwsem);
  3732. init_f2fs_rwsem(&sbi->quota_sem);
  3733. init_waitqueue_head(&sbi->cp_wait);
  3734. spin_lock_init(&sbi->error_lock);
  3735. for (i = 0; i < NR_INODE_TYPE; i++) {
  3736. INIT_LIST_HEAD(&sbi->inode_list[i]);
  3737. spin_lock_init(&sbi->inode_lock[i]);
  3738. }
  3739. mutex_init(&sbi->flush_lock);
  3740. /* Load the checksum driver */
  3741. sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
  3742. if (IS_ERR(sbi->s_chksum_driver)) {
  3743. f2fs_err(sbi, "Cannot load crc32 driver.");
  3744. err = PTR_ERR(sbi->s_chksum_driver);
  3745. sbi->s_chksum_driver = NULL;
  3746. goto free_sbi;
  3747. }
  3748. /* set a block size */
  3749. if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
  3750. f2fs_err(sbi, "unable to set blocksize");
  3751. goto free_sbi;
  3752. }
  3753. err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
  3754. &recovery);
  3755. if (err)
  3756. goto free_sbi;
  3757. sb->s_fs_info = sbi;
  3758. sbi->raw_super = raw_super;
  3759. memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
  3760. /* precompute checksum seed for metadata */
  3761. if (f2fs_sb_has_inode_chksum(sbi))
  3762. sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
  3763. sizeof(raw_super->uuid));
  3764. default_options(sbi);
  3765. /* parse mount options */
  3766. options = kstrdup((const char *)data, GFP_KERNEL);
  3767. if (data && !options) {
  3768. err = -ENOMEM;
  3769. goto free_sb_buf;
  3770. }
  3771. err = parse_options(sb, options, false);
  3772. if (err)
  3773. goto free_options;
  3774. sb->s_maxbytes = max_file_blocks(NULL) <<
  3775. le32_to_cpu(raw_super->log_blocksize);
  3776. sb->s_max_links = F2FS_LINK_MAX;
  3777. err = f2fs_setup_casefold(sbi);
  3778. if (err)
  3779. goto free_options;
  3780. #ifdef CONFIG_QUOTA
  3781. sb->dq_op = &f2fs_quota_operations;
  3782. sb->s_qcop = &f2fs_quotactl_ops;
  3783. sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
  3784. if (f2fs_sb_has_quota_ino(sbi)) {
  3785. for (i = 0; i < MAXQUOTAS; i++) {
  3786. if (f2fs_qf_ino(sbi->sb, i))
  3787. sbi->nquota_files++;
  3788. }
  3789. }
  3790. #endif
  3791. sb->s_op = &f2fs_sops;
  3792. #ifdef CONFIG_FS_ENCRYPTION
  3793. sb->s_cop = &f2fs_cryptops;
  3794. #endif
  3795. #ifdef CONFIG_FS_VERITY
  3796. sb->s_vop = &f2fs_verityops;
  3797. #endif
  3798. sb->s_xattr = f2fs_xattr_handlers;
  3799. sb->s_export_op = &f2fs_export_ops;
  3800. sb->s_magic = F2FS_SUPER_MAGIC;
  3801. sb->s_time_gran = 1;
  3802. sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
  3803. (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
  3804. memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
  3805. sb->s_iflags |= SB_I_CGROUPWB;
  3806. /* init f2fs-specific super block info */
  3807. sbi->valid_super_block = valid_super_block;
  3808. /* disallow all the data/node/meta page writes */
  3809. set_sbi_flag(sbi, SBI_POR_DOING);
  3810. err = f2fs_init_write_merge_io(sbi);
  3811. if (err)
  3812. goto free_bio_info;
  3813. init_sb_info(sbi);
  3814. err = f2fs_init_iostat(sbi);
  3815. if (err)
  3816. goto free_bio_info;
  3817. err = init_percpu_info(sbi);
  3818. if (err)
  3819. goto free_iostat;
  3820. if (F2FS_IO_ALIGNED(sbi)) {
  3821. sbi->write_io_dummy =
  3822. mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
  3823. if (!sbi->write_io_dummy) {
  3824. err = -ENOMEM;
  3825. goto free_percpu;
  3826. }
  3827. }
  3828. /* init per sbi slab cache */
  3829. err = f2fs_init_xattr_caches(sbi);
  3830. if (err)
  3831. goto free_io_dummy;
  3832. err = f2fs_init_page_array_cache(sbi);
  3833. if (err)
  3834. goto free_xattr_cache;
  3835. /* get an inode for meta space */
  3836. sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
  3837. if (IS_ERR(sbi->meta_inode)) {
  3838. f2fs_err(sbi, "Failed to read F2FS meta data inode");
  3839. err = PTR_ERR(sbi->meta_inode);
  3840. goto free_page_array_cache;
  3841. }
  3842. err = f2fs_get_valid_checkpoint(sbi);
  3843. if (err) {
  3844. f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
  3845. goto free_meta_inode;
  3846. }
  3847. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
  3848. set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
  3849. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
  3850. set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
  3851. sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
  3852. }
  3853. if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
  3854. set_sbi_flag(sbi, SBI_NEED_FSCK);
  3855. /* Initialize device list */
  3856. err = f2fs_scan_devices(sbi);
  3857. if (err) {
  3858. f2fs_err(sbi, "Failed to find devices");
  3859. goto free_devices;
  3860. }
  3861. err = f2fs_init_post_read_wq(sbi);
  3862. if (err) {
  3863. f2fs_err(sbi, "Failed to initialize post read workqueue");
  3864. goto free_devices;
  3865. }
  3866. sbi->total_valid_node_count =
  3867. le32_to_cpu(sbi->ckpt->valid_node_count);
  3868. percpu_counter_set(&sbi->total_valid_inode_count,
  3869. le32_to_cpu(sbi->ckpt->valid_inode_count));
  3870. sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
  3871. sbi->total_valid_block_count =
  3872. le64_to_cpu(sbi->ckpt->valid_block_count);
  3873. sbi->last_valid_block_count = sbi->total_valid_block_count;
  3874. sbi->reserved_blocks = 0;
  3875. sbi->current_reserved_blocks = 0;
  3876. limit_reserve_root(sbi);
  3877. adjust_unusable_cap_perc(sbi);
  3878. f2fs_init_extent_cache_info(sbi);
  3879. f2fs_init_ino_entry_info(sbi);
  3880. f2fs_init_fsync_node_info(sbi);
  3881. /* setup checkpoint request control and start checkpoint issue thread */
  3882. f2fs_init_ckpt_req_control(sbi);
  3883. if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
  3884. test_opt(sbi, MERGE_CHECKPOINT)) {
  3885. err = f2fs_start_ckpt_thread(sbi);
  3886. if (err) {
  3887. f2fs_err(sbi,
  3888. "Failed to start F2FS issue_checkpoint_thread (%d)",
  3889. err);
  3890. goto stop_ckpt_thread;
  3891. }
  3892. }
  3893. /* setup f2fs internal modules */
  3894. err = f2fs_build_segment_manager(sbi);
  3895. if (err) {
  3896. f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
  3897. err);
  3898. goto free_sm;
  3899. }
  3900. err = f2fs_build_node_manager(sbi);
  3901. if (err) {
  3902. f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
  3903. err);
  3904. goto free_nm;
  3905. }
  3906. err = adjust_reserved_segment(sbi);
  3907. if (err)
  3908. goto free_nm;
  3909. /* For write statistics */
  3910. sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
  3911. /* Read accumulated write IO statistics if exists */
  3912. seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
  3913. if (__exist_node_summaries(sbi))
  3914. sbi->kbytes_written =
  3915. le64_to_cpu(seg_i->journal->info.kbytes_written);
  3916. f2fs_build_gc_manager(sbi);
  3917. err = f2fs_build_stats(sbi);
  3918. if (err)
  3919. goto free_nm;
  3920. /* get an inode for node space */
  3921. sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
  3922. if (IS_ERR(sbi->node_inode)) {
  3923. f2fs_err(sbi, "Failed to read node inode");
  3924. err = PTR_ERR(sbi->node_inode);
  3925. goto free_stats;
  3926. }
  3927. /* read root inode and dentry */
  3928. root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
  3929. if (IS_ERR(root)) {
  3930. f2fs_err(sbi, "Failed to read root inode");
  3931. err = PTR_ERR(root);
  3932. goto free_node_inode;
  3933. }
  3934. if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
  3935. !root->i_size || !root->i_nlink) {
  3936. iput(root);
  3937. err = -EINVAL;
  3938. goto free_node_inode;
  3939. }
  3940. sb->s_root = d_make_root(root); /* allocate root dentry */
  3941. if (!sb->s_root) {
  3942. err = -ENOMEM;
  3943. goto free_node_inode;
  3944. }
  3945. err = f2fs_init_compress_inode(sbi);
  3946. if (err)
  3947. goto free_root_inode;
  3948. err = f2fs_register_sysfs(sbi);
  3949. if (err)
  3950. goto free_compress_inode;
  3951. #ifdef CONFIG_QUOTA
  3952. /* Enable quota usage during mount */
  3953. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
  3954. err = f2fs_enable_quotas(sb);
  3955. if (err)
  3956. f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
  3957. }
  3958. quota_enabled = f2fs_recover_quota_begin(sbi);
  3959. #endif
  3960. /* if there are any orphan inodes, free them */
  3961. err = f2fs_recover_orphan_inodes(sbi);
  3962. if (err)
  3963. goto free_meta;
  3964. if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
  3965. goto reset_checkpoint;
  3966. /* recover fsynced data */
  3967. if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
  3968. !test_opt(sbi, NORECOVERY)) {
  3969. /*
  3970. * mount should be failed, when device has readonly mode, and
  3971. * previous checkpoint was not done by clean system shutdown.
  3972. */
  3973. if (f2fs_hw_is_readonly(sbi)) {
  3974. if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  3975. err = f2fs_recover_fsync_data(sbi, true);
  3976. if (err > 0) {
  3977. err = -EROFS;
  3978. f2fs_err(sbi, "Need to recover fsync data, but "
  3979. "write access unavailable, please try "
  3980. "mount w/ disable_roll_forward or norecovery");
  3981. }
  3982. if (err < 0)
  3983. goto free_meta;
  3984. }
  3985. f2fs_info(sbi, "write access unavailable, skipping recovery");
  3986. goto reset_checkpoint;
  3987. }
  3988. if (need_fsck)
  3989. set_sbi_flag(sbi, SBI_NEED_FSCK);
  3990. if (skip_recovery)
  3991. goto reset_checkpoint;
  3992. err = f2fs_recover_fsync_data(sbi, false);
  3993. if (err < 0) {
  3994. if (err != -ENOMEM)
  3995. skip_recovery = true;
  3996. need_fsck = true;
  3997. f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
  3998. err);
  3999. goto free_meta;
  4000. }
  4001. } else {
  4002. err = f2fs_recover_fsync_data(sbi, true);
  4003. if (!f2fs_readonly(sb) && err > 0) {
  4004. err = -EINVAL;
  4005. f2fs_err(sbi, "Need to recover fsync data");
  4006. goto free_meta;
  4007. }
  4008. }
  4009. #ifdef CONFIG_QUOTA
  4010. f2fs_recover_quota_end(sbi, quota_enabled);
  4011. #endif
  4012. /*
  4013. * If the f2fs is not readonly and fsync data recovery succeeds,
  4014. * check zoned block devices' write pointer consistency.
  4015. */
  4016. if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
  4017. err = f2fs_check_write_pointer(sbi);
  4018. if (err)
  4019. goto free_meta;
  4020. }
  4021. reset_checkpoint:
  4022. f2fs_init_inmem_curseg(sbi);
  4023. /* f2fs_recover_fsync_data() cleared this already */
  4024. clear_sbi_flag(sbi, SBI_POR_DOING);
  4025. if (test_opt(sbi, DISABLE_CHECKPOINT)) {
  4026. err = f2fs_disable_checkpoint(sbi);
  4027. if (err)
  4028. goto sync_free_meta;
  4029. } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
  4030. f2fs_enable_checkpoint(sbi);
  4031. }
  4032. /*
  4033. * If filesystem is not mounted as read-only then
  4034. * do start the gc_thread.
  4035. */
  4036. if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
  4037. test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
  4038. /* After POR, we can run background GC thread.*/
  4039. err = f2fs_start_gc_thread(sbi);
  4040. if (err)
  4041. goto sync_free_meta;
  4042. }
  4043. kvfree(options);
  4044. /* recover broken superblock */
  4045. if (recovery) {
  4046. err = f2fs_commit_super(sbi, true);
  4047. f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
  4048. sbi->valid_super_block ? 1 : 2, err);
  4049. }
  4050. f2fs_join_shrinker(sbi);
  4051. f2fs_tuning_parameters(sbi);
  4052. f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
  4053. cur_cp_version(F2FS_CKPT(sbi)));
  4054. f2fs_update_time(sbi, CP_TIME);
  4055. f2fs_update_time(sbi, REQ_TIME);
  4056. clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
  4057. cleancache_init_fs(sb);
  4058. return 0;
  4059. sync_free_meta:
  4060. /* safe to flush all the data */
  4061. sync_filesystem(sbi->sb);
  4062. retry_cnt = 0;
  4063. free_meta:
  4064. #ifdef CONFIG_QUOTA
  4065. f2fs_truncate_quota_inode_pages(sb);
  4066. if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
  4067. f2fs_quota_off_umount(sbi->sb);
  4068. #endif
  4069. /*
  4070. * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
  4071. * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
  4072. * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
  4073. * falls into an infinite loop in f2fs_sync_meta_pages().
  4074. */
  4075. truncate_inode_pages_final(META_MAPPING(sbi));
  4076. /* evict some inodes being cached by GC */
  4077. evict_inodes(sb);
  4078. f2fs_unregister_sysfs(sbi);
  4079. free_compress_inode:
  4080. f2fs_destroy_compress_inode(sbi);
  4081. free_root_inode:
  4082. dput(sb->s_root);
  4083. sb->s_root = NULL;
  4084. free_node_inode:
  4085. f2fs_release_ino_entry(sbi, true);
  4086. truncate_inode_pages_final(NODE_MAPPING(sbi));
  4087. iput(sbi->node_inode);
  4088. sbi->node_inode = NULL;
  4089. free_stats:
  4090. f2fs_destroy_stats(sbi);
  4091. free_nm:
  4092. /* stop discard thread before destroying node manager */
  4093. f2fs_stop_discard_thread(sbi);
  4094. f2fs_destroy_node_manager(sbi);
  4095. free_sm:
  4096. f2fs_destroy_segment_manager(sbi);
  4097. stop_ckpt_thread:
  4098. f2fs_stop_ckpt_thread(sbi);
  4099. f2fs_destroy_post_read_wq(sbi);
  4100. free_devices:
  4101. destroy_device_list(sbi);
  4102. kvfree(sbi->ckpt);
  4103. free_meta_inode:
  4104. make_bad_inode(sbi->meta_inode);
  4105. iput(sbi->meta_inode);
  4106. sbi->meta_inode = NULL;
  4107. free_page_array_cache:
  4108. f2fs_destroy_page_array_cache(sbi);
  4109. free_xattr_cache:
  4110. f2fs_destroy_xattr_caches(sbi);
  4111. free_io_dummy:
  4112. mempool_destroy(sbi->write_io_dummy);
  4113. free_percpu:
  4114. destroy_percpu_info(sbi);
  4115. free_iostat:
  4116. f2fs_destroy_iostat(sbi);
  4117. free_bio_info:
  4118. for (i = 0; i < NR_PAGE_TYPE; i++)
  4119. kvfree(sbi->write_io[i]);
  4120. #if IS_ENABLED(CONFIG_UNICODE)
  4121. utf8_unload(sb->s_encoding);
  4122. sb->s_encoding = NULL;
  4123. #endif
  4124. free_options:
  4125. #ifdef CONFIG_QUOTA
  4126. for (i = 0; i < MAXQUOTAS; i++)
  4127. kfree(F2FS_OPTION(sbi).s_qf_names[i]);
  4128. #endif
  4129. fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
  4130. kvfree(options);
  4131. free_sb_buf:
  4132. kfree(raw_super);
  4133. free_sbi:
  4134. if (sbi->s_chksum_driver)
  4135. crypto_free_shash(sbi->s_chksum_driver);
  4136. kfree(sbi);
  4137. /* give only one another chance */
  4138. if (retry_cnt > 0 && skip_recovery) {
  4139. retry_cnt--;
  4140. shrink_dcache_sb(sb);
  4141. goto try_onemore;
  4142. }
  4143. return err;
  4144. }
  4145. static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
  4146. const char *dev_name, void *data)
  4147. {
  4148. return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
  4149. }
  4150. static void kill_f2fs_super(struct super_block *sb)
  4151. {
  4152. if (sb->s_root) {
  4153. struct f2fs_sb_info *sbi = F2FS_SB(sb);
  4154. set_sbi_flag(sbi, SBI_IS_CLOSE);
  4155. f2fs_stop_gc_thread(sbi);
  4156. f2fs_stop_discard_thread(sbi);
  4157. #ifdef CONFIG_F2FS_FS_COMPRESSION
  4158. /*
  4159. * latter evict_inode() can bypass checking and invalidating
  4160. * compress inode cache.
  4161. */
  4162. if (test_opt(sbi, COMPRESS_CACHE))
  4163. truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
  4164. #endif
  4165. if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
  4166. !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
  4167. struct cp_control cpc = {
  4168. .reason = CP_UMOUNT,
  4169. };
  4170. f2fs_write_checkpoint(sbi, &cpc);
  4171. }
  4172. if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
  4173. sb->s_flags &= ~SB_RDONLY;
  4174. }
  4175. kill_block_super(sb);
  4176. }
  4177. static struct file_system_type f2fs_fs_type = {
  4178. .owner = THIS_MODULE,
  4179. .name = "f2fs",
  4180. .mount = f2fs_mount,
  4181. .kill_sb = kill_f2fs_super,
  4182. .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
  4183. };
  4184. MODULE_ALIAS_FS("f2fs");
  4185. static int __init init_inodecache(void)
  4186. {
  4187. f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
  4188. sizeof(struct f2fs_inode_info), 0,
  4189. SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
  4190. return f2fs_inode_cachep ? 0 : -ENOMEM;
  4191. }
  4192. static void destroy_inodecache(void)
  4193. {
  4194. /*
  4195. * Make sure all delayed rcu free inodes are flushed before we
  4196. * destroy cache.
  4197. */
  4198. rcu_barrier();
  4199. kmem_cache_destroy(f2fs_inode_cachep);
  4200. }
  4201. static int __init init_f2fs_fs(void)
  4202. {
  4203. int err;
  4204. if (PAGE_SIZE != F2FS_BLKSIZE) {
  4205. printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
  4206. PAGE_SIZE, F2FS_BLKSIZE);
  4207. return -EINVAL;
  4208. }
  4209. err = init_inodecache();
  4210. if (err)
  4211. goto fail;
  4212. err = f2fs_create_node_manager_caches();
  4213. if (err)
  4214. goto free_inodecache;
  4215. err = f2fs_create_segment_manager_caches();
  4216. if (err)
  4217. goto free_node_manager_caches;
  4218. err = f2fs_create_checkpoint_caches();
  4219. if (err)
  4220. goto free_segment_manager_caches;
  4221. err = f2fs_create_recovery_cache();
  4222. if (err)
  4223. goto free_checkpoint_caches;
  4224. err = f2fs_create_extent_cache();
  4225. if (err)
  4226. goto free_recovery_cache;
  4227. err = f2fs_create_garbage_collection_cache();
  4228. if (err)
  4229. goto free_extent_cache;
  4230. err = f2fs_init_sysfs();
  4231. if (err)
  4232. goto free_garbage_collection_cache;
  4233. err = register_shrinker(&f2fs_shrinker_info, "f2fs-shrinker");
  4234. if (err)
  4235. goto free_sysfs;
  4236. err = register_filesystem(&f2fs_fs_type);
  4237. if (err)
  4238. goto free_shrinker;
  4239. f2fs_create_root_stats();
  4240. err = f2fs_init_post_read_processing();
  4241. if (err)
  4242. goto free_root_stats;
  4243. err = f2fs_init_iostat_processing();
  4244. if (err)
  4245. goto free_post_read;
  4246. err = f2fs_init_bio_entry_cache();
  4247. if (err)
  4248. goto free_iostat;
  4249. err = f2fs_init_bioset();
  4250. if (err)
  4251. goto free_bio_entry_cache;
  4252. err = f2fs_init_compress_mempool();
  4253. if (err)
  4254. goto free_bioset;
  4255. err = f2fs_init_compress_cache();
  4256. if (err)
  4257. goto free_compress_mempool;
  4258. err = f2fs_create_casefold_cache();
  4259. if (err)
  4260. goto free_compress_cache;
  4261. return 0;
  4262. free_compress_cache:
  4263. f2fs_destroy_compress_cache();
  4264. free_compress_mempool:
  4265. f2fs_destroy_compress_mempool();
  4266. free_bioset:
  4267. f2fs_destroy_bioset();
  4268. free_bio_entry_cache:
  4269. f2fs_destroy_bio_entry_cache();
  4270. free_iostat:
  4271. f2fs_destroy_iostat_processing();
  4272. free_post_read:
  4273. f2fs_destroy_post_read_processing();
  4274. free_root_stats:
  4275. f2fs_destroy_root_stats();
  4276. unregister_filesystem(&f2fs_fs_type);
  4277. free_shrinker:
  4278. unregister_shrinker(&f2fs_shrinker_info);
  4279. free_sysfs:
  4280. f2fs_exit_sysfs();
  4281. free_garbage_collection_cache:
  4282. f2fs_destroy_garbage_collection_cache();
  4283. free_extent_cache:
  4284. f2fs_destroy_extent_cache();
  4285. free_recovery_cache:
  4286. f2fs_destroy_recovery_cache();
  4287. free_checkpoint_caches:
  4288. f2fs_destroy_checkpoint_caches();
  4289. free_segment_manager_caches:
  4290. f2fs_destroy_segment_manager_caches();
  4291. free_node_manager_caches:
  4292. f2fs_destroy_node_manager_caches();
  4293. free_inodecache:
  4294. destroy_inodecache();
  4295. fail:
  4296. return err;
  4297. }
  4298. static void __exit exit_f2fs_fs(void)
  4299. {
  4300. f2fs_destroy_casefold_cache();
  4301. f2fs_destroy_compress_cache();
  4302. f2fs_destroy_compress_mempool();
  4303. f2fs_destroy_bioset();
  4304. f2fs_destroy_bio_entry_cache();
  4305. f2fs_destroy_iostat_processing();
  4306. f2fs_destroy_post_read_processing();
  4307. f2fs_destroy_root_stats();
  4308. unregister_filesystem(&f2fs_fs_type);
  4309. unregister_shrinker(&f2fs_shrinker_info);
  4310. f2fs_exit_sysfs();
  4311. f2fs_destroy_garbage_collection_cache();
  4312. f2fs_destroy_extent_cache();
  4313. f2fs_destroy_recovery_cache();
  4314. f2fs_destroy_checkpoint_caches();
  4315. f2fs_destroy_segment_manager_caches();
  4316. f2fs_destroy_node_manager_caches();
  4317. destroy_inodecache();
  4318. }
  4319. module_init(init_f2fs_fs)
  4320. module_exit(exit_f2fs_fs)
  4321. MODULE_AUTHOR("Samsung Electronics's Praesto Team");
  4322. MODULE_DESCRIPTION("Flash Friendly File System");
  4323. MODULE_LICENSE("GPL");
  4324. MODULE_SOFTDEP("pre: crc32");